code
stringlengths 25
201k
| docstring
stringlengths 19
96.2k
| func_name
stringlengths 0
235
| language
stringclasses 1
value | repo
stringlengths 8
51
| path
stringlengths 11
314
| url
stringlengths 62
377
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
@Test
public void blocked_nodes_are_canceled_when_killed() {
final Node aNode = createAndAddNode("a");
aNode.setStatus(Status.RUNNING);
final Node bNode = createAndAddNode("b");
bNode.addParent(aNode);
bNode.setStatus(Status.BLOCKED);
this.testFlow.setStatus(Status.RUNNING);
this.testFlow.kill();
assertThat(aNode.getStatus()).isEqualTo(Status.KILLING);
assertThat(bNode.getStatus()).isEqualTo(Status.CANCELED);
}
|
Tests blocked nodes are canceled when the dag is killed.
|
blocked_nodes_are_canceled_when_killed
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
@Test
public void success_node_state_remain_the_same_when_killed() {
final Node aNode = createAndAddNode("a");
aNode.setStatus(Status.SUCCESS);
final Node bNode = createAndAddNode("b");
bNode.setStatus(Status.RUNNING);
bNode.addParent(aNode);
this.testFlow.kill();
assertThat(aNode.getStatus()).isEqualTo(Status.SUCCESS);
assertThat(bNode.getStatus()).isEqualTo(Status.KILLING);
}
|
Tests success nodes' states remain the same when the dag is killed.
<pre>
a (success)
/
b (running)
</pre>
|
success_node_state_remain_the_same_when_killed
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
@Test
public void failed_node_state_remain_the_same_when_killed() {
final Node aNode = createAndAddNode("a");
aNode.setStatus(Status.RUNNING);
final Node bNode = createAndAddNode("b");
bNode.setStatus(Status.FAILURE);
this.testFlow.kill();
assertThat(aNode.getStatus()).isEqualTo(Status.KILLING);
assertThat(bNode.getStatus()).isEqualTo(Status.FAILURE);
}
|
Tests failed nodes' states remain the same when the dag is killed.
This can happen when running jobs are allowed to finish when a node fails.
<pre>
a (running) b (failure)
</pre>
|
failed_node_state_remain_the_same_when_killed
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
private Node createAndAddNode(final String name) {
final Node node = TestUtil.createNodeWithNullProcessor(name, this.testFlow);
this.testFlow.addNode(node);
return node;
}
|
Creates a node and add to the test dag.
@param name node name
@return Node object
|
createAndAddNode
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
private void assertMapSizeMatchEnumSize(
final Map<Status, Boolean> map) {
final int mapSize = map.size();
final int enumSize = Status.values().length;
assertThat(enumSize).isEqualTo(mapSize);
}
|
Asserts the given map contains the same number of entries as the number of values of the {@link
Status} has.
@param map a map that contains status and its associated boolean value
|
assertMapSizeMatchEnumSize
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/StatusTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/StatusTest.java
|
Apache-2.0
|
@Override
public void changeStatus(final Node node, final Status status) {
System.out.println(node);
this.statusChangeRecorder.recordNode(node);
switch (node.getStatus()) {
case RUNNING:
// Don't mark the job finished. Simulate a long running job.
this.nodeRunningLatch.countDown();
break;
case KILLING:
this.dagService.markNodeKilled(node);
break;
default:
break;
}
}
|
A node processor that tests killing a node.
@param nodeRunningLatch signal that the node has started running
|
changeStatus
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/TestKillNodeProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/TestKillNodeProcessor.java
|
Apache-2.0
|
@Override
public void changeStatus(final Node node, final Status status) {
System.out.println(node);
this.statusChangeRecorder.recordNode(node);
switch (status) {
case RUNNING:
//Discover the node in the parent Dag this subDag is associated with.
//This allows the subtag processor to inform the parent dag the status change.
this.testSubDagProcessor.setNode(node);
this.dagService.startDag(this.dag);
break;
case KILLING:
this.dagService.killDag(this.dag);
break;
default:
break;
}
}
|
Triggers the sub DAG state change when the sub DAG node in the parent DAG's status changes.
@param node the node to change
@param status the new status
|
changeStatus
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/TestSubDagNodeProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/TestSubDagNodeProcessor.java
|
Apache-2.0
|
@Override
public void changeStatus(final Dag dag, final Status status) {
System.out.println(dag);
this.statusChangeRecorder.recordDag(dag);
requireNonNull(this.node, "Node for the subDag in the parent DAG can't be null.");
switch (status) {
case SUCCESS:
this.dagService.markNodeSuccess(this.node);
break;
case FAILURE:
this.dagService.markNodeFailed(this.node);
break;
default:
break;
}
}
|
Transfers the node state in the parent DAG when the sub DAG status changes.
@param dag the dag to change
@param status the new status
|
changeStatus
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/TestSubDagProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/TestSubDagProcessor.java
|
Apache-2.0
|
public void setNode(final Node node) {
this.node = node;
}
|
Sets the node that this subflow belongs.
<p>
Can't pass this information in the constructor since it will cause a circular dependency
problem.
@param node the node as part of the parent flow
|
setNode
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/TestSubDagProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/TestSubDagProcessor.java
|
Apache-2.0
|
static Node createNodeWithNullProcessor(final String name, final Dag dag) {
return new Node(name, mock(NodeProcessor.class), dag);
}
|
Creates a node with a processor that does nothing.
@param name node name
|
createNodeWithNullProcessor
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/TestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/TestUtil.java
|
Apache-2.0
|
@Before
public void setUp() throws Exception {
this.testUtil = new FlowRunnerTestUtil("embedded2", this.temporaryFolder);
}
|
Test the flow run, especially with embedded flows.
This test uses executions/embedded2. It also mainly uses the flow named jobf. The test is
designed to control success/failures explicitly so we don't have to time the flow exactly.
Flow jobf looks like the following:
<pre>
joba joba1
/ | \ |
/ | \ |
jobb jobd jobc |
\ | / /
\ | / /
jobe /
| /
| /
jobf
</pre>
The job 'jobb' is an embedded flow:
jobb:innerFlow
<pre>
innerJobA
/ \
innerJobB innerJobC
\ /
innerFlow
</pre>
The job 'jobd' is a simple embedded flow:
jobd:innerFlow2
<pre>
innerJobA
|
innerFlow2
</pre>
The following tests checks each stage of the flow run by forcing jobs to succeed or fail.
|
setUp
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testBasicRun() throws Exception {
final Map<String, String> flowParams = new HashMap<>();
flowParams.put("param4", "override.4");
flowParams.put("param10", "override.10");
flowParams.put("param11", "override.11");
final ExecutionOptions options = new ExecutionOptions();
options.setFailureAction(FailureAction.FINISH_CURRENTLY_RUNNING);
Props props = new Props();
props.put(AZKABAN_WEBSERVER_URL, "http://localhost:8443");
this.runner = this.testUtil.createFromFlowMap("jobf", options, flowParams, props);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
final Props joba = this.runner.getExecutableFlow().getExecutableNodePath("joba")
.getInputProps();
assertEquals("joba.1", joba.get("param1"));
assertEquals("test1.2", joba.get("param2"));
assertEquals("test1.3", joba.get("param3"));
assertEquals("override.4", joba.get("param4"));
assertEquals("test2.5", joba.get("param5"));
assertEquals("test2.6", joba.get("param6"));
assertEquals("test2.7", joba.get("param7"));
assertEquals("test2.8", joba.get("param8"));
assertThat(joba.get(CommonJobProperties.JOB_ID)).isEqualTo("joba");
assertThat(joba.get(CommonJobProperties.JOB_ATTEMPT)).isEqualTo("0");
assertThat(joba.get(CommonJobProperties.EXECUTION_LINK))
.matches("http://localhost:8443/executor\\?execid=\\d\\d\\d");
assertThat(joba.get(CommonJobProperties.WORKFLOW_LINK))
.matches("http://localhost:8443/manager\\?project=testProject&flow=jobf");
assertThat(joba.get(CommonJobProperties.JOBEXEC_LINK))
.matches("http://localhost:8443/executor\\?execid=\\d\\d\\d&job=joba");
assertThat(joba.get(CommonJobProperties.JOB_LINK))
.matches("http://localhost:8443/manager\\?project=testProject&flow=jobf&job=joba");
assertThat(joba.get(CommonJobProperties.ATTEMPT_LINK))
.matches("http://localhost:8443/executor\\?execid=\\d\\d\\d&job=joba&attempt=0");
final Props joba1 = this.runner.getExecutableFlow().getExecutableNodePath("joba1")
.getInputProps();
assertEquals("test1.1", joba1.get("param1"));
assertEquals("test1.2", joba1.get("param2"));
assertEquals("test1.3", joba1.get("param3"));
assertEquals("override.4", joba1.get("param4"));
assertEquals("test2.5", joba1.get("param5"));
assertEquals("test2.6", joba1.get("param6"));
assertEquals("test2.7", joba1.get("param7"));
assertEquals("test2.8", joba1.get("param8"));
// 2. JOB A COMPLETES SUCCESSFULLY
InteractiveTestJob.getTestJob("joba").succeedJob(
Props.of("output.joba", "joba", "output.override", "joba"));
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
final ExecutableNode node = this.runner.getExecutableFlow().getExecutableNodePath("jobb");
assertEquals(Status.RUNNING, node.getStatus());
final Props jobb = node.getInputProps();
assertEquals("override.4", jobb.get("param4"));
// Test that jobb properties overwrites the output properties
assertEquals("moo", jobb.get("testprops"));
assertEquals("jobb", jobb.get("output.override"));
assertEquals("joba", jobb.get("output.joba"));
final Props jobbInnerJobA = this.runner.getExecutableFlow()
.getExecutableNodePath("jobb:innerJobA")
.getInputProps();
assertEquals("test1.1", jobbInnerJobA.get("param1"));
assertEquals("test1.2", jobbInnerJobA.get("param2"));
assertEquals("test1.3", jobbInnerJobA.get("param3"));
assertEquals("override.4", jobbInnerJobA.get("param4"));
assertEquals("test2.5", jobbInnerJobA.get("param5"));
assertEquals("test2.6", jobbInnerJobA.get("param6"));
assertEquals("test2.7", jobbInnerJobA.get("param7"));
assertEquals("test2.8", jobbInnerJobA.get("param8"));
assertEquals("joba", jobbInnerJobA.get("output.joba"));
assertThat(jobbInnerJobA.get(CommonJobProperties.JOB_ID)).isEqualTo("innerJobA");
assertThat(jobbInnerJobA.get(CommonJobProperties.JOB_ATTEMPT)).isEqualTo("0");
assertThat(jobbInnerJobA.get(CommonJobProperties.EXECUTION_LINK))
.matches("http://localhost:8443/executor\\?execid=\\d\\d\\d");
assertThat(jobbInnerJobA.get(CommonJobProperties.WORKFLOW_LINK))
.matches("http://localhost:8443/manager\\?project=testProject&flow=innerFlow");
assertThat(jobbInnerJobA.get(CommonJobProperties.JOBEXEC_LINK))
.matches("http://localhost:8443/executor\\?execid=\\d\\d\\d&job=jobb:innerJobA");
assertThat(jobbInnerJobA.get(CommonJobProperties.JOB_LINK))
.matches("http://localhost:8443/manager\\?project=testProject&flow=innerFlow&job=innerJobA");
assertThat(jobbInnerJobA.get(CommonJobProperties.ATTEMPT_LINK))
.matches("http://localhost:8443/executor\\?execid=\\d\\d\\d&job=jobb:innerJobA&attempt=0");
// 3. jobb:Inner completes
/// innerJobA completes
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob(
Props.of("output.jobb.innerJobA", "jobb.innerJobA"));
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
final Props jobbInnerJobB = this.runner.getExecutableFlow()
.getExecutableNodePath("jobb:innerJobB")
.getInputProps();
assertEquals("test1.1", jobbInnerJobB.get("param1"));
assertEquals("override.4", jobbInnerJobB.get("param4"));
assertEquals("jobb.innerJobA",
jobbInnerJobB.get("output.jobb.innerJobA"));
assertEquals("moo", jobbInnerJobB.get("testprops"));
/// innerJobB, C completes
InteractiveTestJob.getTestJob("jobb:innerJobB").succeedJob(
Props.of("output.jobb.innerJobB", "jobb.innerJobB"));
InteractiveTestJob.getTestJob("jobb:innerJobC").succeedJob(
Props.of("output.jobb.innerJobC", "jobb.innerJobC"));
assertStatus("jobb:innerJobB", Status.SUCCEEDED);
assertStatus("jobb:innerJobC", Status.SUCCEEDED);
assertStatus("jobb:innerFlow", Status.RUNNING);
final Props jobbInnerJobD = this.runner.getExecutableFlow()
.getExecutableNodePath("jobb:innerFlow")
.getInputProps();
assertEquals("test1.1", jobbInnerJobD.get("param1"));
assertEquals("override.4", jobbInnerJobD.get("param4"));
assertEquals("jobb.innerJobB",
jobbInnerJobD.get("output.jobb.innerJobB"));
assertEquals("jobb.innerJobC",
jobbInnerJobD.get("output.jobb.innerJobC"));
// 4. Finish up on inner flow for jobb
InteractiveTestJob.getTestJob("jobb:innerFlow").succeedJob(
Props.of("output1.jobb", "test1", "output2.jobb", "test2"));
assertStatus("jobb:innerFlow", Status.SUCCEEDED);
assertStatus("jobb", Status.SUCCEEDED);
final Props jobbOutput = this.runner.getExecutableFlow().getExecutableNodePath("jobb")
.getOutputProps();
assertEquals("test1", jobbOutput.get("output1.jobb"));
assertEquals("test2", jobbOutput.get("output2.jobb"));
// 5. Finish jobc, jobd
InteractiveTestJob.getTestJob("jobc").succeedJob(
Props.of("output.jobc", "jobc"));
assertStatus("jobc", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("jobd:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerFlow2").succeedJob();
assertStatus("jobd:innerJobA", Status.SUCCEEDED);
assertStatus("jobd:innerFlow2", Status.SUCCEEDED);
assertStatus("jobd", Status.SUCCEEDED);
assertStatus("jobe", Status.RUNNING);
final Props jobd = this.runner.getExecutableFlow().getExecutableNodePath("jobe")
.getInputProps();
assertEquals("test1", jobd.get("output1.jobb"));
assertEquals("jobc", jobd.get("output.jobc"));
// 6. Finish off flow
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobe").succeedJob();
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobe", Status.SUCCEEDED);
assertStatus("jobf", Status.RUNNING);
InteractiveTestJob.getTestJob("jobf").succeedJob();
assertStatus("jobf", Status.SUCCEEDED);
waitForAndAssertFlowStatus(Status.SUCCEEDED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the basic successful flow run, and also tests all output variables from each job.
|
testBasicRun
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testDisabledNormal() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
final ExecutableFlow flow = this.runner.getExecutableFlow();
flow.getExecutableNode("jobb").setStatus(Status.DISABLED);
((ExecutableFlowBase) flow.getExecutableNode("jobd")).getExecutableNode(
"innerJobA").setStatus(Status.DISABLED);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY, others should be skipped
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.SKIPPED);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.SKIPPED);
assertStatus("jobd:innerFlow2", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.READY);
assertStatus("jobb:innerJobB", Status.READY);
assertStatus("jobb:innerJobC", Status.READY);
assertStatus("jobb:innerFlow", Status.READY);
// 3. jobb:Inner completes
/// innerJobA completes
InteractiveTestJob.getTestJob("jobc").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerFlow2").succeedJob();
assertStatus("jobd:innerFlow2", Status.SUCCEEDED);
assertStatus("jobd", Status.SUCCEEDED);
assertStatus("jobc", Status.SUCCEEDED);
assertStatus("jobe", Status.RUNNING);
InteractiveTestJob.getTestJob("jobe").succeedJob();
InteractiveTestJob.getTestJob("joba1").succeedJob();
assertStatus("jobe", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobf", Status.RUNNING);
// 4. Finish up on inner flow for jobb
InteractiveTestJob.getTestJob("jobf").succeedJob();
assertStatus("jobf", Status.SUCCEEDED);
waitForAndAssertFlowStatus(Status.SUCCEEDED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests a flow with Disabled jobs and flows. They should properly SKIP executions
|
testDisabledNormal
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testNormalFailure1() throws Exception {
// Test propagation of KILLED status to embedded flows.
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY, others should be skipped
InteractiveTestJob.getTestJob("joba").failJob();
waitForAndAssertFlowStatus(Status.FAILED_FINISHING);
assertStatus("joba", Status.FAILED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.CANCELLED);
assertStatus("jobc", Status.CANCELLED);
assertStatus("jobd", Status.CANCELLED);
assertStatus("jobd:innerJobA", Status.READY);
assertStatus("jobd:innerFlow2", Status.READY);
assertStatus("jobb:innerJobA", Status.READY);
assertStatus("jobb:innerFlow", Status.READY);
assertStatus("jobe", Status.CANCELLED);
// 3. jobb:Inner completes
/// innerJobA completes
InteractiveTestJob.getTestJob("joba1").succeedJob();
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.FAILED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests a failure with the default FINISH_CURRENTLY_RUNNING. After the first failure, every job
that started should complete, and the rest of the jobs should be skipped.
|
testNormalFailure1
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testFailedFinishingFailure3() throws Exception {
// Test propagation of KILLED status to embedded flows different branch
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_ALL_POSSIBLE);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB in subflow FAILS
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
InteractiveTestJob.getTestJob("jobb:innerJobB").failJob();
assertStatus("jobb", Status.FAILED_FINISHING);
assertStatus("jobb:innerJobB", Status.FAILED);
waitForAndAssertFlowStatus(Status.FAILED_FINISHING);
InteractiveTestJob.getTestJob("jobb:innerJobC").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerJobA").succeedJob();
assertStatus("jobb", Status.FAILED);
assertStatus("jobd:innerJobA", Status.SUCCEEDED);
assertStatus("jobd:innerFlow2", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.SUCCEEDED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
InteractiveTestJob.getTestJob("jobd:innerFlow2").succeedJob();
assertStatus("jobd:innerFlow2", Status.SUCCEEDED);
assertStatus("jobd", Status.SUCCEEDED);
// 3. jobc completes, everything is killed
InteractiveTestJob.getTestJob("jobc").succeedJob();
assertStatus("jobc", Status.SUCCEEDED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.FAILED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests failures when the fail behaviour is FINISH_ALL_POSSIBLE. In this case, all jobs which
have had its pre-requisite met can continue to run. Finishes when the failure is propagated to
the last node of the flow.
|
testFailedFinishingFailure3
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testCancelOnFailure() throws Exception {
// Test propagation of KILLED status to embedded flows different branch
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.CANCEL_ALL);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB in subflow FAILS
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
InteractiveTestJob.getTestJob("jobb:innerJobB").failJob();
assertStatus("jobb", Status.FAILED);
assertStatus("jobb:innerJobB", Status.FAILED);
assertStatus("jobb:innerJobC", Status.KILLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobc", Status.KILLED);
assertStatus("jobd", Status.KILLED);
assertStatus("jobd:innerJobA", Status.KILLED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the failure condition when a failure invokes a cancel (or killed) on the flow.
Any jobs that are running will be assigned a KILLED state, and any nodes which were skipped due
to prior errors will be given a CANCELLED state.
|
testCancelOnFailure
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testCancel() throws Exception {
// Test propagation of KILLED status to embedded flows different branch
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.CANCEL_ALL);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB in subflow FAILS
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
this.runner.kill("me");
assertStatus("jobb", Status.KILLED);
assertStatus("jobb:innerJobB", Status.KILLED);
assertStatus("jobb:innerJobC", Status.KILLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobc", Status.KILLED);
assertStatus("jobd", Status.KILLED);
assertStatus("jobd:innerJobA", Status.KILLED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the manual Killing of a flow. In this case, the flow is just fine before the cancel is
called.
|
testCancel
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testManualCancelOnFailure() throws Exception {
// Test propagation of KILLED status to embedded flows different branch
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB in subflow FAILS
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
InteractiveTestJob.getTestJob("jobb:innerJobB").failJob();
assertStatus("jobb:innerJobB", Status.FAILED);
assertStatus("jobb", Status.FAILED_FINISHING);
waitForAndAssertFlowStatus(Status.FAILED_FINISHING);
this.runner.kill("me");
assertStatus("jobb", Status.FAILED);
assertStatus("jobb:innerJobC", Status.KILLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobc", Status.KILLED);
assertStatus("jobd", Status.KILLED);
assertStatus("jobd:innerJobA", Status.KILLED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the manual invocation of cancel on a flow that is FAILED_FINISHING
|
testManualCancelOnFailure
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testPause() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
this.runner.pause("test");
InteractiveTestJob.getTestJob("joba").succeedJob();
// 2.1 JOB A COMPLETES SUCCESSFULLY AFTER PAUSE
assertStatus("joba", Status.SUCCEEDED);
waitForAndAssertFlowStatus(Status.PAUSED);
// 2.2 Flow is unpaused
this.runner.resume("test");
waitForAndAssertFlowStatus(Status.RUNNING);
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
// 3. jobb:Inner completes
this.runner.pause("test");
/// innerJobA completes, but paused
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob(
Props.of("output.jobb.innerJobA", "jobb.innerJobA"));
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
this.runner.resume("test");
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
/// innerJobB, C completes
InteractiveTestJob.getTestJob("jobb:innerJobB").succeedJob(
Props.of("output.jobb.innerJobB", "jobb.innerJobB"));
InteractiveTestJob.getTestJob("jobb:innerJobC").succeedJob(
Props.of("output.jobb.innerJobC", "jobb.innerJobC"));
assertStatus("jobb:innerJobB", Status.SUCCEEDED);
assertStatus("jobb:innerJobC", Status.SUCCEEDED);
assertStatus("jobb:innerFlow", Status.RUNNING);
// 4. Finish up on inner flow for jobb
InteractiveTestJob.getTestJob("jobb:innerFlow").succeedJob(
Props.of("output1.jobb", "test1", "output2.jobb", "test2"));
assertStatus("jobb:innerFlow", Status.SUCCEEDED);
assertStatus("jobb", Status.SUCCEEDED);
// 5. Finish jobc, jobd
InteractiveTestJob.getTestJob("jobc").succeedJob(
Props.of("output.jobc", "jobc"));
assertStatus("jobc", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("jobd:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerFlow2").succeedJob();
assertStatus("jobd:innerJobA", Status.SUCCEEDED);
assertStatus("jobd:innerFlow2", Status.SUCCEEDED);
assertStatus("jobd", Status.SUCCEEDED);
assertStatus("jobe", Status.RUNNING);
// 6. Finish off flow
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobe").succeedJob();
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobe", Status.SUCCEEDED);
assertStatus("jobf", Status.RUNNING);
InteractiveTestJob.getTestJob("jobf").succeedJob();
assertStatus("jobf", Status.SUCCEEDED);
waitForAndAssertFlowStatus(Status.SUCCEEDED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests that pause and resume work
|
testPause
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testPauseKill() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
this.runner.pause("me");
waitForAndAssertFlowStatus(Status.PAUSED);
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerJobA").succeedJob();
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("jobd:innerJobA", Status.SUCCEEDED);
this.runner.kill("me");
assertStatus("joba1", Status.KILLED);
assertStatus("jobb:innerJobB", Status.CANCELLED);
assertStatus("jobb:innerJobC", Status.CANCELLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobb", Status.KILLED);
assertStatus("jobc", Status.KILLED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobd", Status.KILLED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Test the condition for a manual invocation of a KILL (cancel) on a flow that has been paused.
The flow should unpause and be killed immediately.
|
testPauseKill
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testPauseFail() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
final EventCollectorListener eventCollector = new EventCollectorListener();
this.runner.addListener(eventCollector);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
this.runner.pause("me");
waitForAndAssertFlowStatus(Status.PAUSED);
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerJobA").failJob();
assertStatus("jobd:innerJobA", Status.FAILED);
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
// When flow is paused, no new jobs are started. So these two jobs that were already running
// are allowed to finish, but their dependencies aren't started.
// Now, ensure that jobd:innerJobA has completely finished as failed before resuming.
// If we would resume before the job failure has been completely processed, FlowRunner would be
// able to start some new jobs instead of cancelling everything.
FlowRunnerTestUtil.waitEventFired(eventCollector, "jobd:innerJobA", Status.FAILED);
waitForAndAssertFlowStatus(Status.PAUSED);
this.runner.resume("me");
assertStatus("jobb:innerJobB", Status.CANCELLED);
assertStatus("jobb:innerJobC", Status.CANCELLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobb", Status.KILLED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobd", Status.FAILED);
InteractiveTestJob.getTestJob("jobc").succeedJob();
InteractiveTestJob.getTestJob("joba1").succeedJob();
assertStatus("jobc", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobf", Status.CANCELLED);
assertStatus("jobe", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.FAILED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the case where a failure occurs on a Paused flow. In this case, the flow should stay
paused.
|
testPauseFail
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testPauseFailFinishAll() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_ALL_POSSIBLE);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
this.runner.pause("me");
waitForAndAssertFlowStatus(Status.PAUSED);
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerJobA").failJob();
assertStatus("jobd:innerJobA", Status.FAILED);
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
this.runner.resume("me");
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobd", Status.FAILED);
InteractiveTestJob.getTestJob("jobc").succeedJob();
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobB").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobC").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerFlow").succeedJob();
assertStatus("jobc", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.SUCCEEDED);
assertStatus("jobb:innerJobC", Status.SUCCEEDED);
assertStatus("jobb:innerFlow", Status.SUCCEEDED);
assertStatus("jobb", Status.SUCCEEDED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.FAILED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Test the condition when a Finish all possible is called during a pause. The Failure is not
acted upon until the flow is resumed.
|
testPauseFailFinishAll
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testFlowKilledByJobLevelSLA() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.CANCEL_ALL);
FlowRunnerTestUtil.startThread(this.runner);
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
for (final JobRunner jobRunner : this.runner.getActiveJobRunners()) {
if (jobRunner.getJobId().equals("joba")) {
jobRunner.killBySLA();
break;
}
}
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the case when a job is killed by SLA causing a flow to fail. The flow should be in
"killed" status.
|
testFlowKilledByJobLevelSLA
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testPauseFailKill() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.CANCEL_ALL);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
this.runner.pause("me");
waitForAndAssertFlowStatus(Status.PAUSED);
InteractiveTestJob.getTestJob("jobd:innerJobA").failJob();
assertStatus("jobd:innerJobA", Status.FAILED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobd", Status.FAILED);
assertStatus("jobb:innerJobA", Status.KILLED);
assertStatus("jobb:innerJobB", Status.CANCELLED);
assertStatus("jobb:innerJobC", Status.CANCELLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobb", Status.KILLED);
assertStatus("jobc", Status.KILLED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
assertStatus("joba1", Status.KILLED);
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the case when a flow is paused and a failure causes a kill. The flow should die
immediately regardless of the 'paused' status.
|
testPauseFailKill
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testKillBeforeStart() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_ALL_POSSIBLE);
this.runner.addListener((event) -> {
if (event.getType().equals(EventType.FLOW_STARTED)) {
// kill interrupts the current thread which would cause an exception if called directly,
// so do it from another thread.
Thread aThread = new Thread( () -> this.runner.kill());
aThread.start();
try {
// give the thread a chance to kill the execution
aThread.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
});
FlowRunnerTestUtil.startThread(this.runner).join();
// children jobs shouldn't start
assertStatus("joba", Status.READY);
assertStatus("joba1", Status.READY);
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the case when an execution is killed before it has started. The final execution
status should "KILLED".
|
testKillBeforeStart
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
/**
* Deleting everything in the cache to accommodate new item.
*/
public void testDeletingAll() {
final ProjectCacheCleaner cleaner = new ProjectCacheCleaner(this.cacheDir, 0.3);
cleaner.deleteProjectDirsIfNecessary(7000000);
cleaner.finishPendingCleanup();
assertThat(this.cacheDir.list()).hasSize(0);
}
|
There's still space in the cache, no deletion.
|
testDeletingAll
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
Apache-2.0
|
@Test
/**
* Deleting two least recently used items in the cache to accommodate new item.
*/
public void testDeletingTwoLRUItems() {
final ProjectCacheCleaner cleaner = new ProjectCacheCleaner(this.cacheDir, 0.7);
cleaner.deleteProjectDirsIfNecessary(3000000);
cleaner.finishPendingCleanup();
assertThat(this.cacheDir.list()).hasSize(1);
assertThat(this.cacheDir.list()).contains("3.1");
}
|
Deleting everything in the cache to accommodate new item.
|
testDeletingTwoLRUItems
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
Apache-2.0
|
@Test
/**
* Deleting the least recently used item in the cache to accommodate new item.
*/
public void testDeletingOneLRUItem() {
final ProjectCacheCleaner cleaner = new ProjectCacheCleaner(this.cacheDir, 0.7);
cleaner.deleteProjectDirsIfNecessary(2000000);
cleaner.finishPendingCleanup();
assertThat(this.cacheDir.list()).hasSize(2);
assertThat(this.cacheDir.list()).contains("3.1");
assertThat(this.cacheDir.list()).contains("2.1");
}
|
Deleting two least recently used items in the cache to accommodate new item.
|
testDeletingOneLRUItem
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
Apache-2.0
|
@Test
/**
* Put enough items in the cache to invoke throttle condition.
*/
public void testThrottleCondition() {
final ProjectCacheCleaner cleaner = new ProjectCacheCleaner(this.cacheDir, 0.65, 0.7);
cleaner.deleteProjectDirsIfNecessary(3000000);
assertThat(this.cacheDir.list()).hasSize(1);
assertThat(this.cacheDir.list()).contains("3.1");
}
|
Deleting the least recently used item in the cache to accommodate new item.
|
testThrottleCondition
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
Apache-2.0
|
private void disableFSCache() {
this.conf.setBoolean(FS_HDFS_IMPL_DISABLE_CACHE, true);
this.conf.setBoolean(FS_FAILOVER_IMPL_DISABLE_CACHE, true);
this.conf.setBoolean(FS_LOCAL_IMPL_DISABLE_CACHE, true);
// Get the default scheme
final String defaultFS = conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
if (defaultFS == null) {
return;
}
final String scheme = new Path(defaultFS).toUri().getScheme();
if (scheme == null) {
return;
}
// Construct the property name
final String FS_DEFAULT_IMPL_DISABLE_CACHE =
"fs." + scheme + IMPL_DISABLE_CACHE_SUFFIX;
this.conf.setBoolean(FS_DEFAULT_IMPL_DISABLE_CACHE, true);
logger.info("Disable cache for scheme " + FS_DEFAULT_IMPL_DISABLE_CACHE);
}
|
This class is used as abstract class for all the HadoopSecurityManager versions.
|
disableFSCache
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
private String verifySecureProperty(final Props props, final String s)
throws HadoopSecurityManagerException {
final String value = props.getString(s);
if (value == null) {
throw new HadoopSecurityManagerException(s + " not set in properties.");
}
return value;
}
|
This method is used to get property from props object. It will throw an exception when property
doesn't exist.
@param props
@param s
@return
@throws HadoopSecurityManagerException
|
verifySecureProperty
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public synchronized UserGroupInformation getProxiedUser(final String realIdentity, final String userToProxy)
throws HadoopSecurityManagerException {
if (userToProxy == null) {
throw new HadoopSecurityManagerException("userToProxy can't be null");
}
UserGroupInformation ugi = this.userUgiMap.get(userToProxy);
if (ugi == null) {
logger.info("Proxy user " + userToProxy
+ " does not exist. Creating new proxy user");
if (this.shouldProxy) {
try {
ugi =
UserGroupInformation.createProxyUser(userToProxy,
UserGroupInformation.getLoginUser());
} catch (final IOException e) {
throw new HadoopSecurityManagerException(
"Failed to create proxy user", e);
}
} else {
ugi = UserGroupInformation.createRemoteUser(userToProxy);
}
this.userUgiMap.putIfAbsent(userToProxy, ugi);
}
return ugi;
}
|
Create a proxied user based on the explicit user name, taking other parameters necessary from
properties file. It is also taking readIdentity for audit purpose.
|
getProxiedUser
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public FileSystem getFSAsUser(final String user)
throws HadoopSecurityManagerException {
return getFSAsUser(user, user);
}
|
Get file system as User passed in parameter.
@param user
@return
@throws HadoopSecurityManagerException
|
getFSAsUser
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
public boolean shouldProxy() {
return this.shouldProxy;
}
|
This method will verify whether proxy is allowed or not.
@return
|
shouldProxy
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected CredentialProvider getCustomCredentialProvider(final Props props,
final Credentials hadoopCred,
final Logger jobLogger, final String customCredentialProviderName) {
String credentialClassName = "unknown class";
try {
credentialClassName = props.getString(customCredentialProviderName);
logger.info("custom credential class name: " + credentialClassName);
final Class credentialClass = Class.forName(credentialClassName);
// The credential class must have a constructor accepting 3 parameters, Credentials,
// Props, and Logger in order.
final Constructor constructor = credentialClass
.getConstructor(Credentials.class, Props.class, org.apache.log4j.Logger.class);
final CredentialProvider customCredential = (CredentialProvider) constructor
.newInstance(hadoopCred, props, jobLogger);
return customCredential;
} catch (final Exception e) {
logger.error("Encountered error while loading and instantiating "
+ credentialClassName, e);
throw new IllegalStateException("Encountered error while loading and instantiating "
+ credentialClassName, e);
}
}
|
This method is used to get custom credential provider.
@param props
@param hadoopCred
@param jobLogger
@param customCredentialProviderName
@return
|
getCustomCredentialProvider
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected void registerCustomCredential(final Props props, final Credentials hadoopCred,
final String userToProxy, final org.apache.log4j.Logger jobLogger,
final String customCredentialProviderName) {
final CredentialProvider customCredential = getCustomCredentialProvider(
props, hadoopCred, jobLogger, customCredentialProviderName);
final KeyStore keyStore = KeyStoreManager.getInstance().getKeyStore();
if (keyStore != null) {
// KeyStore is prepopulated to be used by Credential Provider.
// This KeyStore is expected especially in case of containerized execution when it is preferred
// to keep it in-memory of Azkaban user rather than on the file-system of container. This ensures
// that the user can't access it.
try {
((CredentialProviderWithKeyStore) customCredential).setKeyStore(keyStore);
} catch (ClassCastException e) {
logger.error("Encountered error while casting to CredentialProviderWithKeyStore", e);
throw new IllegalStateException(
"Encountered error while casting to CredentialProviderWithKeyStore", e);
} catch (final Exception e) {
logger.error("Unknown error occurred while setting keyStore", e);
throw new IllegalStateException("Unknown error occurred while setting keyStore", e);
}
}
customCredential.register(userToProxy);
}
|
This method is used to register custom credentials which will be used when doPrefetch method is
called.
@param props
@param hadoopCred
@param userToProxy
@param jobLogger
@param customCredentialProviderName
|
registerCustomCredential
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected void doPrefetch(final File tokenFile, final Props props, final Logger logger,
final String userToProxy) throws HadoopSecurityManagerException {
// Create suffix to be added to kerberos principal
final String suffix = getFQNSuffix(props);
final String userToProxyFQN = userToProxy + suffix;
logger.info(tokenFile.toString() + props.toAllProperties().toString());
logger.info("Getting hadoop tokens based on props for " + userToProxyFQN);
final Credentials cred = new Credentials();
try {
// cred is being populated
fetchAllHadoopTokens(userToProxyFQN, userToProxy, props, logger, cred);
getProxiedUser(userToProxyFQN).doAs((PrivilegedExceptionAction<Void>) () -> {
registerAllCustomCredentials(userToProxy, props, cred, logger);
return null;
});
logger.info("fetched cred = " + cred);
cred.getAllTokens().forEach(t -> {
logger.info(String.format("Token = %s, %s, %s ", t.getKind(), t.getService(),
Arrays.toString(t.getIdentifier())));
});
logger.info("cred end");
logger.info("Preparing token file " + tokenFile.getAbsolutePath());
// assign userToProxy to the owner of the token file, not the FQN user
prepareTokenFile(userToProxy, cred, tokenFile, logger,
props.getString(Constants.ConfigurationKeys.SECURITY_USER_GROUP, "azkaban"));
// stash them to cancel after use.
logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());
} catch (final Exception e) {
throw new HadoopSecurityManagerException("Failed to get hadoop tokens! "
+ e.getMessage() + e.getCause(), e);
} catch (final Throwable t) {
throw new HadoopSecurityManagerException("Failed to get hadoop tokens! "
+ t.getMessage() + t.getCause(), t);
}
}
|
This method is used to prefetch all required tokens for a job.
@param tokenFile
@param props
@param logger
@param userToProxy
@throws HadoopSecurityManagerException
|
doPrefetch
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected String getFQNSuffix(Props props) {
return (null != props.getString(HadoopSecurityManager.DOMAIN_NAME, null)) ?
FQN_SUFFIX_DELIMITER + kerberosSuffix(props) : "";
}
|
This method is used to get FQN suffix which will be added to proxy user.
@param props
@return
|
getFQNSuffix
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
private void prepareTokenFile(final String user,
final Credentials credentials,
final File tokenFile,
final Logger logger,
final String group) throws IOException {
writeCredentialsToFile(credentials, tokenFile, logger);
try {
assignPermissions(user, tokenFile, group);
} catch (final IOException e) {
// On any error managing token file. delete the file
tokenFile.delete();
throw e;
}
}
|
Prepare token file. Writes credentials to a token file and sets appropriate permissions to keep
the file secure
@param user user to be proxied
@param credentials Credentials to be written to file
@param tokenFile file to be written
@param logger logger to use
@param group user group to own the token file
@throws IOException If there are issues in reading / updating the token file
|
prepareTokenFile
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
private void writeCredentialsToFile(final Credentials credentials, final File tokenFile,
final Logger logger)
throws IOException {
FileOutputStream fos = null;
DataOutputStream dos = null;
try {
fos = new FileOutputStream(tokenFile);
dos = new DataOutputStream(fos);
credentials.writeTokenStorageToStream(dos);
} finally {
if (dos != null) {
try {
dos.close();
} catch (final Throwable t) {
// best effort
logger.error("encountered exception while closing DataOutputStream of the tokenFile", t);
}
}
if (fos != null) {
fos.close();
}
}
}
|
This method is used to write all the credentials into file so that the file can be shared with
user job process.
@param credentials
@param tokenFile
@param logger
@throws IOException
|
writeCredentialsToFile
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected Optional<String[]> getOtherNameNodes(final Props props) {
// getting additional name nodes tokens
final String otherNameNodes = props.get(OTHER_NAMENODES_TO_GET_TOKEN);
if ((otherNameNodes != null) && (otherNameNodes.length() > 0)) {
logger.info("Fetching token(s) for other namenode(s): " + otherNameNodes);
final String[] nameNodeArr = otherNameNodes.split(",");
return Optional.of(nameNodeArr);
}
return Optional.empty();
}
|
This method is used to fetch other NameNodes.
@param props
@return
|
getOtherNameNodes
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
private Token<DelegationTokenIdentifier> fetchHcatToken(final String userToProxy,
final HiveConf hiveConf, final String tokenSignatureOverwrite, final Logger logger)
throws IOException, MetaException, TException {
logger.info(HiveConf.ConfVars.METASTOREURIS.varname + ": "
+ hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname));
logger.info(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname + ": "
+ hiveConf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname));
logger.info(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname + ": "
+ hiveConf.get(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname));
final IMetaStoreClient hiveClient = createRetryingMetaStoreClient(hiveConf);
final String hcatTokenStr =
Failsafe.with(retryPolicy)
.get(() -> hiveClient.getDelegationToken(userToProxy, UserGroupInformation
.getLoginUser().getShortUserName()));
final Token<DelegationTokenIdentifier> hcatToken =
new Token<>();
hcatToken.decodeFromUrlString(hcatTokenStr);
// overwrite the value of the service property of the token if the signature
// override is specified.
// If the service field is set, do not overwrite that
if (hcatToken.getService().getLength() <= 0 && tokenSignatureOverwrite != null
&& tokenSignatureOverwrite.trim().length() > 0) {
hcatToken.setService(new Text(tokenSignatureOverwrite.trim()
.toLowerCase()));
logger.info(HIVE_TOKEN_SIGNATURE_KEY + ":" + tokenSignatureOverwrite);
}
logger.info("Created hive metastore token.");
logger.info("Token kind: " + hcatToken.getKind());
logger.info("Token service: " + hcatToken.getService());
return hcatToken;
}
|
This method is to fetch hcat token as per the specified hive configuration and then store the
token in to the credential store specified .
@param userToProxy String value indicating the name of the user the token will be
fetched for.
@param hiveConf the configuration based off which the hive client will be
initialized.
@param tokenSignatureOverwrite
@param logger the logger instance which writes the logging content to the job
logs.
|
fetchHcatToken
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
private void fetchNameNodeToken(final String userToProxyFQN,
final String userToProxy, final Props props,
final Logger logger,
final Credentials cred) throws IOException, HadoopSecurityManagerException {
logger.info("Here is the props for " + HadoopSecurityManager.OBTAIN_NAMENODE_TOKEN +
": " + props.getBoolean(HadoopSecurityManager.OBTAIN_NAMENODE_TOKEN));
if (props.getBoolean(HadoopSecurityManager.OBTAIN_NAMENODE_TOKEN, false)) {
final String renewer = getMRTokenRenewerInternal(new JobConf()).toString();
logger.info("Renewer is " + renewer);
// Get the tokens name node
fetchNameNodeTokenInternal(renewer, cred, userToProxyFQN, null);
Optional<String[]> otherNameNodes = getOtherNameNodes(props);
if (otherNameNodes.isPresent()) {
String[] nameNodeArr = otherNameNodes.get();
for (String nameNode : nameNodeArr) {
fetchNameNodeTokenInternal(renewer, cred, userToProxyFQN,
new Path(nameNode.trim()).toUri());
logger.info("Successfully fetched tokens for: " + nameNode);
}
}
} else {
logger.info(
HadoopSecurityManager_H_2_0.OTHER_NAMENODES_TO_GET_TOKEN + " was not configured");
}
}
|
This method is used to fetch delegation token for NameNode and add it in cred object.
@param userToProxyFQN
@param userToProxy
@param props
@param logger
@param cred
@throws IOException
@throws HadoopSecurityManagerException
|
fetchNameNodeToken
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
private void fetchNameNodeTokenInternal(final String renewer, final Credentials cred,
final String userToProxyFQN, final URI uri)
throws IOException, HadoopSecurityManagerException {
FileSystem fs = null;
try {
// Use FileSystem.get() instead of newInstance() to ensure cache is not used.
// .get() method checks if cache is enabled or not, newInstance() does not.
if (uri == null) {
fs = Failsafe.with(retryPolicy)
.get(()->FileSystem.get(conf))
;
} else {
fs = Failsafe.with(retryPolicy)
.get(()->FileSystem.get(uri, conf));
}
// check if we get the correct FS, and most importantly, the conf
logger.info("Getting DFS token from " + fs.getUri());
try {
FileSystem finalFs = fs;
final Token<?>[] fsTokens = Failsafe.with(retryPolicy)
.get(()-> finalFs.addDelegationTokens(renewer, cred));
for (int i = 0; i < fsTokens.length; i++) {
final Token<?> fsToken = fsTokens[i];
logger.info(String.format(
"DFS token from namenode pre-fetched, token kind: %s, token service: %s",
fsToken.getKind(), fsToken.getService()));
}
} catch (Exception e) {
// Adding logging of configuration on when exception is encountered.
logger.info("Hadoop Configuration Values used:\n");
conf.forEach(s -> {
logger.info("key:" + s.getKey() + " value:" + s.getValue());
});
logger.error("Failed to fetch DFS token for " + userToProxyFQN + "because of " +e + e.getMessage());
throw new HadoopSecurityManagerException(
"Failed to fetch DFS token for " + userToProxyFQN);
}
} finally {
if (fs != null) {
fs.close();
}
}
}
|
fetchNameNodeInternal - With modified UGI which is of the format,
<userToProxy>/az_<host name>_<exec_id>
Due to this change, the FileSystem cache creates an entry per execution instead of an entry per
proxy user. This could blow up the cache very quickly on a busy Executor and cause OOM. To make
this worse, the entry in Cache is never used as it is specific to an execution. To avoid this,
the FileSystem Cache should be disabled before calling this method.
@param renewer
@param cred
@param userToProxyFQN
@param uri
@throws IOException
@throws HadoopSecurityManagerException
|
fetchNameNodeTokenInternal
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
private void fetchJHSToken(final String userToProxyFQN,
final String userToProxy, final Props props, final Logger logger, final Credentials cred)
throws HadoopSecurityManagerException, IOException {
if (props.getBoolean(OBTAIN_JOBHISTORYSERVER_TOKEN, false)) {
logger.info("Pre-fetching JH token from job history server");
final YarnRPC rpc = YarnRPC.create(this.conf);
final String serviceAddr = this.conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);
logger.info("Connecting to HistoryServer at: " + serviceAddr);
final HSClientProtocol hsProxy =
(HSClientProtocol) rpc.getProxy(HSClientProtocol.class,
NetUtils.createSocketAddr(serviceAddr), this.conf);
Token<?> jhsdt = null;
try {
jhsdt = getDelegationTokenFromHS(hsProxy);
} catch (final Exception e) {
logger.error("Failed to fetch JH token" + e.getMessage());
throw new HadoopSecurityManagerException(
"Failed to fetch JH token for " + userToProxyFQN);
}
if (hsProxy instanceof Closeable) {
// HSClientProtocol is not closable, but its only implementation, HSClientProtocolPBClientImpl, is
((Closeable) hsProxy).close();
}
if (jhsdt == null) {
logger.error("getDelegationTokenFromHS() returned null");
throw new HadoopSecurityManagerException(
"Unable to fetch JH token for " + userToProxyFQN);
}
logger.info(String
.format("JH token from job history server pre-fetched, token Kind: %s, token service: %s",
jhsdt.getKind(), jhsdt.getService()));
cred.addToken(jhsdt.getService(), jhsdt);
}
}
|
This method is used to fetch delegation token for JHS and add it in cred object.
@param userToProxyFQN
@param userToProxy
@param props
@param logger
@param cred
@throws HadoopSecurityManagerException
@throws IOException
|
fetchJHSToken
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
public Dependency copy() {
try {
return new Dependency(getFileName(), getDestination(), getType(), getIvyCoordinates(),
getSHA1());
} catch (final InvalidHashException e) {
// This should never happen because we already validated the hash when creating this dependency
throw new RuntimeException("InvalidHashException when copying dependency.");
}
}
|
Make a copy of this dependency
@return a copy of this dependency
|
copy
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
Apache-2.0
|
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
DependencyFile that = (DependencyFile) o;
return Objects.equals(file, that.file);
}
|
Representation of startup dependency with an associated local file. Usually a DependencyFile will never be
directly instantiated (except maybe in tests), but rather will be generated from an instance of a Dependency
using Dependency::makeDependencyFile(File f)
|
equals
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/DependencyFile.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/DependencyFile.java
|
Apache-2.0
|
public boolean isFlowEventType() {
return this == FLOW_STARTED || this == FLOW_FINISHED || this == FLOW_STATUS_CHANGED;
}
|
Enum class defining the list of supported event types.
|
isFlowEventType
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/EventType.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/EventType.java
|
Apache-2.0
|
public void processStatusUpdate(final DependencyInstance depInst) {
//this is blocking call, might offload it to another thread if necessary.
this.flowTriggerInstanceLoader.updateDependencyExecutionStatus(depInst);
}
|
Process status update of dependency instance
|
processStatusUpdate
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/DependencyInstanceProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/DependencyInstanceProcessor.java
|
Apache-2.0
|
public void processSucceed(final TriggerInstance triggerInst) {
//todo chengren311: publishing Trigger events to Azkaban Project Events page
this.executorService.submit(() -> executeFlowAndUpdateExecID(triggerInst));
}
|
Process the case where status of trigger instance becomes success
|
processSucceed
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/TriggerInstanceProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/TriggerInstanceProcessor.java
|
Apache-2.0
|
public void processTermination(final TriggerInstance triggerInst) {
//sendFailureEmailIfConfigured takes 1/3 secs
this.executorService.submit(() -> sendFailureEmailIfConfigured(triggerInst));
}
|
Process the case where status of trigger instance becomes cancelled
|
processTermination
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/TriggerInstanceProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/TriggerInstanceProcessor.java
|
Apache-2.0
|
public void shutdown() {
this.executorService.shutdown();
this.executorService.shutdownNow();
}
|
Process the case where a new trigger instance is created
|
shutdown
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/TriggerInstanceProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/TriggerInstanceProcessor.java
|
Apache-2.0
|
@Override
public int deleteTriggerExecutionsFinishingOlderThan(final long timestamp) {
try {
final Collection<TriggerInstance> res = this.dbOperator
.query(SELECT_EXECUTION_OLDER_THAN,
new TriggerInstanceHandler(SORT_MODE.SORT_ON_START_TIME_DESC), timestamp);
final Set<String> toBeDeleted = new HashSet<>();
for (final TriggerInstance inst : res) {
if ((inst.getStatus() == Status.CANCELLED || (inst.getStatus() == Status.SUCCEEDED && inst
.getFlowExecId() != -1)) && inst.getEndTime() <= timestamp) {
toBeDeleted.add(inst.getId());
}
}
int numDeleted = 0;
if (!toBeDeleted.isEmpty()) {
final String ids = toBeDeleted.stream().map(s -> "'" + s + "'")
.collect(Collectors.joining(", "));
numDeleted = this.dbOperator.update(DELETE_EXECUTIONS.replace("?", ids));
}
logger.info("{} dependency instance record(s) deleted", numDeleted);
return numDeleted;
} catch (final SQLException ex) {
handleSQLException(ex);
return 0;
}
}
|
Retrieve sorted trigger instances on start time in descending order
given projectId, flowId, start position and length.
@param projectId
@param flowId
@param from starting position of the range of trigger instance to retrieve
@param length number of consecutive trigger instances to retrieve
|
deleteTriggerExecutionsFinishingOlderThan
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/database/JdbcFlowTriggerInstanceLoaderImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/database/JdbcFlowTriggerInstanceLoaderImpl.java
|
Apache-2.0
|
@Override
protected synchronized Class<?> loadClass(final String name, final boolean resolve)
throws ClassNotFoundException {
try {
// first we try to find a class inside the child classloader
return this.childClassLoader.findClass(name);
} catch (final ClassNotFoundException e) {
// didn't find it, try the parent
return super.loadClass(name, resolve);
}
}
|
A parent-last classloader that will try the child classloader first and then the parent.
Adopted from https://stackoverflow.com/questions/5445511/how-do-i-create-a-parent-last-child-first-classloader-in-java-or-how-to-overr
|
loadClass
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/plugin/ParentLastURLClassLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/plugin/ParentLastURLClassLoader.java
|
Apache-2.0
|
@Override
public Class<?> findClass(final String name) throws ClassNotFoundException {
return super.findClass(name);
}
|
This class allows me to call findClass on a classloader
|
findClass
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/plugin/ParentLastURLClassLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/plugin/ParentLastURLClassLoader.java
|
Apache-2.0
|
@Override
public Class<?> findClass(final String name) throws ClassNotFoundException {
try {
final Class<?> loaded = super.findLoadedClass(name);
if (loaded != null) {
return loaded;
}
// first try to use the URLClassLoader findClass
return super.findClass(name);
} catch (final ClassNotFoundException e) {
// if that fails, we ask our real parent classloader to load the class (we give up)
return this.realParent.loadClass(name);
}
}
|
This class delegates (child then parent) for the findClass method for a URLClassLoader.
We need this because findClass is protected in URLClassLoader
|
findClass
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/plugin/ParentLastURLClassLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/plugin/ParentLastURLClassLoader.java
|
Apache-2.0
|
public List<ScheduledFlowTrigger> getScheduledFlowTriggerJobs() {
try {
final Scheduler quartzScheduler = this.scheduler.getScheduler();
if(quartzScheduler == null) {
logger.warn("Unable to get scheduled flow triggers - Quartz scheduler has not been initialized");
return new ArrayList<>();
}
final List<String> groupNames = quartzScheduler.getJobGroupNames();
final List<ScheduledFlowTrigger> flowTriggerJobDetails = new ArrayList<>();
for (final String groupName : groupNames) {
final JobKey jobKey = new JobKey(FlowTriggerQuartzJob.JOB_NAME, groupName);
ScheduledFlowTrigger scheduledFlowTrigger = null;
try {
final JobDetail job = quartzScheduler.getJobDetail(jobKey);
final JobDataMap jobDataMap = job.getJobDataMap();
final String flowId = jobDataMap.getString(FlowTriggerQuartzJob.FLOW_ID);
final int projectId = jobDataMap.getInt(FlowTriggerQuartzJob.PROJECT_ID);
final FlowTrigger flowTrigger = (FlowTrigger) jobDataMap
.get(FlowTriggerQuartzJob.FLOW_TRIGGER);
final String submitUser = jobDataMap.getString(FlowTriggerQuartzJob.SUBMIT_USER);
final List<? extends Trigger> quartzTriggers = quartzScheduler.getTriggersOfJob(jobKey);
final boolean isPaused = this.scheduler
.isJobPaused(FlowTriggerQuartzJob.JOB_NAME, groupName);
final Project project = projectManager.getProject(projectId);
final Flow flow = project.getFlow(flowId);
scheduledFlowTrigger = new ScheduledFlowTrigger(projectId,
this.projectManager.getProject(projectId).getName(),
flowId, flowTrigger, submitUser, quartzTriggers.isEmpty() ? null
: quartzTriggers.get(0), isPaused, flow.isLocked());
} catch (final Exception ex) {
logger.error("Unable to get flow trigger by job key {}", jobKey, ex);
scheduledFlowTrigger = null;
}
flowTriggerJobDetails.add(scheduledFlowTrigger);
}
return flowTriggerJobDetails;
} catch (final Exception ex) {
logger.error("Unable to get scheduled flow triggers", ex);
return new ArrayList<>();
}
}
|
Retrieve the list of scheduled flow triggers from quartz database
|
getScheduledFlowTriggerJobs
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/quartz/FlowTriggerScheduler.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/quartz/FlowTriggerScheduler.java
|
Apache-2.0
|
@Override
public DeleteResponse deleteImageVersion(
final String imageType, final int versionId, final Boolean forceDelete)
throws ImageMgmtException {
final DeleteResponse deleteResponse = imageMgmtCommonDao.deleteImageVersion(imageType,
versionId, forceDelete);
// Check if there are errors and data. If present convert to API specific response
if (deleteResponse.hasErrors() && deleteResponse.getData().isPresent()) {
final ImageVersionUsageData imageVersionUsageData =
(ImageVersionUsageData) deleteResponse.getData().get();
final ImageVersionDTO imageVersionDTO = imageVersionUsageData.getImageVersion() == null ? null :
versionConverter.convertToApiResponseDTO(imageVersionUsageData.getImageVersion());
final List<ImageRampupPlanResponseDTO> imageRampupPlanResponseDTOs =
imageVersionUsageData.getImageRampupPlans() == null ? null :
rampupPlanConverter
.convertToApiResponseDTOs(imageVersionUsageData.getImageRampupPlans());
final ImageVersionUsageDataDTO imageVersionUsageDataDTO = new ImageVersionUsageDataDTO(
imageVersionDTO, imageRampupPlanResponseDTOs);
deleteResponse.setData(imageVersionUsageDataDTO);
}
return deleteResponse;
}
|
Implementation for image management common service
|
deleteImageVersion
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageMgmtCommonServiceImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageMgmtCommonServiceImpl.java
|
Apache-2.0
|
@Override
public void createRule(final ImageRampRuleRequestDTO rampRuleRequest, final User ldapUser){
// validate image_name and image_version
final ImageType imageType = imageTypeDao
.getImageTypeByName(rampRuleRequest.getImageName())
.orElseThrow(() -> new ImageMgmtInvalidInputException(ErrorCode.NOT_FOUND, String.format("Unable to"
+ " fetch image type metadata. Invalid image type: %s.", rampRuleRequest.getImageName())));
if (this.imageVersionDao.isInvalidVersion(rampRuleRequest.getImageName(), rampRuleRequest.getImageVersion())) {
log.error("fail to validate image version: " + rampRuleRequest.getImageVersion());
throw new ImageMgmtInvalidInputException(ErrorCode.NOT_FOUND, String.format(
"Unable to fetch image version metadata. Invalid image version: %s.", rampRuleRequest.getImageVersion()));
}
Set<String> ownerships;
// if user does not specify owners of a normal ramp rule, use ImageType owners as default
if (rampRuleRequest.getOwnerships() == null || rampRuleRequest.getOwnerships().isEmpty()) {
// fetch ownerships from image_ownerships and validate user permission
ownerships = permissionManager.validatePermissionAndGetOwnerships(imageType.getName(), ldapUser);
} else {
List<String> ruleOwners = Arrays.asList(rampRuleRequest.getOwnerships().split(","));
permissionManager.validateIdentity(ruleOwners);
ownerships = new HashSet<>(ruleOwners);
}
// convert ImageRampRule and insert new ramp rule into DB
ImageRampRule rampRule = new ImageRampRule.Builder()
.setRuleName(rampRuleRequest.getRuleName())
.setImageName(rampRuleRequest.getImageName())
.setImageVersion(rampRuleRequest.getImageVersion())
.setOwners(ownerships)
.setHPRule(false)
.setCreatedBy(rampRuleRequest.getCreatedBy())
.setModifiedBy(rampRuleRequest.getModifiedBy())
.build();
rampRuleDao.addRampRule(rampRule);
}
|
Create ramp rule converted from ramp rule request, validate image version and user permission.
Then call for {@link RampRuleDao} to insert the entry into DB.
@param rampRuleRequest
@param ldapUser
@throws ImageMgmtInvalidInputException when failing on invalid image metadata
@throws ImageMgmtDaoException when DB insertion fail
@throws ImageMgmtInvalidPermissionException when user does not have permission
|
createRule
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampRuleServiceImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampRuleServiceImpl.java
|
Apache-2.0
|
@Override
public void createHpFlowRule(final RampRuleOwnershipDTO hpFlowRuleOwnershipRequestDTO, final User user) {
if (hpFlowRuleOwnershipRequestDTO.getOwnerships() == null
|| hpFlowRuleOwnershipRequestDTO.getOwnerships().isEmpty()) {
throw new ImageMgmtInvalidInputException(ErrorCode.BAD_REQUEST,
"missing ownerships, please specify valid ldap user");
}
List<String> ruleOwners = Arrays.asList(hpFlowRuleOwnershipRequestDTO.getOwnerships().split(","));
permissionManager.validateIdentity(ruleOwners);
ImageRampRule rampRule = new ImageRampRule.Builder()
.setRuleName(hpFlowRuleOwnershipRequestDTO.getRuleName())
.setOwners(new HashSet<>(ruleOwners))
.setHPRule(true)
.setCreatedBy(hpFlowRuleOwnershipRequestDTO.getCreatedBy())
.setModifiedBy(hpFlowRuleOwnershipRequestDTO.getModifiedBy())
.build();
rampRuleDao.addRampRule(rampRule);
}
|
Create HP Flow rule converted from HPFlowRule request, validate input ownerships.
Then call for {@link RampRuleDao} to insert the entry into DB.
@param hpFlowRuleOwnershipRequestDTO
@param user
@throws ImageMgmtDaoException when DB insertion fail
@throws ImageMgmtValidationException when user does not have permission
|
createHpFlowRule
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampRuleServiceImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampRuleServiceImpl.java
|
Apache-2.0
|
@Override
public String updateRuleOwnership(final RampRuleOwnershipDTO ruleOwnershipDTO, final User user,
final OperationType operationType) {
Set<String> existingOwners = rampRuleDao.getOwners(ruleOwnershipDTO.getRuleName());
// validate current user has permission to change owner
if (!permissionManager.hasPermission(user, existingOwners)) {
throw new ImageMgmtInvalidPermissionException(ErrorCode.UNAUTHORIZED,
"current user "+ user.getUserId() + " does not have permission to change ownership");
}
// validate input ldaps
List<String> deltaOwners = Arrays.asList(ruleOwnershipDTO.getOwnerships().split(","));
permissionManager.validateIdentity(deltaOwners);
switch (operationType) {
case ADD:
Set<String> missingLdaps = deltaOwners.stream()
.filter(owner -> !existingOwners.contains(owner))
.collect(Collectors.toSet());
String newOwners = String.join(",", existingOwners).concat(",").concat(String.join(",", missingLdaps));
rampRuleDao.updateOwnerships(newOwners, ruleOwnershipDTO.getRuleName(), ruleOwnershipDTO.getModifiedBy());
return newOwners;
case REMOVE:
Set<String> alteredOwnership = existingOwners.stream()
.filter(owner -> !deltaOwners.contains(owner)).collect(Collectors.toSet());
String newOwnership = String.join(",", alteredOwnership);
rampRuleDao.updateOwnerships(newOwnership, ruleOwnershipDTO.getRuleName(), ruleOwnershipDTO.getModifiedBy());
return newOwnership;
}
return Strings.EMPTY;
}
|
Update Ramp Rule ownership based on {@link RampRuleOwnershipDTO} from user request to add/remove owners,
generate new owner list and update at DB.
Only azkaban admin or existing owners has the permission.
@param ruleOwnershipDTO DTO from requestBody
@param user
@param operationType Add/Remove owners
@throws ImageMgmtDaoException when DB update fail
@throws ImageMgmtValidationException when user does not have permission
@return newOwners
|
updateRuleOwnership
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampRuleServiceImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampRuleServiceImpl.java
|
Apache-2.0
|
@Override
public void deleteRule(final String ruleName, final User user) {
// validate permission
final Set<String> owners = rampRuleDao.getOwners(ruleName);
if (!permissionManager.hasPermission(user, owners)) {
log.error("current user "+ user.getUserId() + " does not have permission to delete ramp rule");
throw new ImageMgmtInvalidPermissionException(ErrorCode.UNAUTHORIZED,
"current user "+ user.getUserId() + " does not have permission to ramp rule");
}
rampRuleDao.deleteRampRule(ruleName);
}
|
delete ramp rule's metadata based on given ruleName
@param ruleName - ruleName in {@link ImageRampRule}
@param user - user to operate
|
deleteRule
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampRuleServiceImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampRuleServiceImpl.java
|
Apache-2.0
|
@Override
public void addFlowsToRule(final List<ProjectFlow> flowIds, final String ruleName, final User user) {
// validate permission
final Set<String> owners = rampRuleDao.getOwners(ruleName);
if (!permissionManager.hasPermission(user, owners)) {
log.error("current user "+ user.getUserId() + " does not have permission to add flows to Ramp rule");
throw new ImageMgmtInvalidPermissionException(ErrorCode.UNAUTHORIZED,
"current user "+ user.getUserId() + " does not have permission to add flows to Ramp rule");
}
// validate flowId is correct (valid and in right format)
try {
for (final ProjectFlow flowId : flowIds) {
// validate flows and projects exist and flows are in the active project
if (!projectLoader.isFlowInProject(flowId.projectName, flowId.flowName)) {
log.error("flowId " + flowId + " invalid, either project or flow not exist or active.");
throw new ImageMgmtInvalidInputException(ErrorCode.BAD_REQUEST,
"flowId " + flowId + " invalid, either project or flow not exist or active.");
}
}
} catch (ProjectManagerException e) {
log.error("failed to validate inputs" + e);
throw new ImageMgmtException(ErrorCode.BAD_REQUEST, "failed to validate inputs: " + e.getMessage());
}
// insert into flow.deny.list table with record {flowId, denyMode, denyVersions, ruleName}
rampRuleDao.addFlowDenyInfo(flowIds, ruleName);
}
|
add flows into ramp rules. Validation will be performed based on owner list, active project and valid flows.
call Dao layer to insert flow to image deny metadata into DB.
@param flowIds
@param ruleName
@param user
|
addFlowsToRule
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampRuleServiceImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampRuleServiceImpl.java
|
Apache-2.0
|
@Override
public void updateVersionOnRule(final String newVersion, final String ruleName, final User user) {
// validate permission
final ImageRampRule imageRampRule = rampRuleDao.getRampRule(ruleName);
final Set<String> owners = new HashSet<>(Arrays.asList(imageRampRule.getOwners().split(",")));
if (!permissionManager.hasPermission(user, owners)) {
log.error("current user "+ user.getUserId() + " does not have permission to add flows to Ramp rule");
throw new ImageMgmtInvalidPermissionException(ErrorCode.UNAUTHORIZED,
"current user "+ user.getUserId() + " does not have permission to add flows to Ramp rule");
}
if (imageRampRule.isHPRule()) {
log.error("Can't update version on a HP flow rule");
throw new ImageMgmtInvalidInputException(ErrorCode.BAD_REQUEST, "Can't update version on a HP flow rule");
}
if (this.imageVersionDao.isInvalidVersion(imageRampRule.getImageName(), newVersion)) {
log.error("fail to validate image version: " + newVersion);
throw new ImageMgmtInvalidInputException(ErrorCode.NOT_FOUND, String.format(
"Unable to fetch image version metadata. Invalid image version: %s.", newVersion));
}
rampRuleDao.updateVersionOnRule(newVersion, ruleName, user.getUserId());
}
|
Update normal ramp rule's version based on given ruleName, validated based on current user.
@param ruleName - ruleName in {@link ImageRampRule}
@param newVersion - new version to be updated
@param user - user must have the permission to operate
@throws ImageMgmtException
|
updateVersionOnRule
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampRuleServiceImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampRuleServiceImpl.java
|
Apache-2.0
|
private boolean vaidateRampup(final ImageRampupPlanRequestDTO imageRampupPlanRequest)
throws ImageMgmtValidationException {
final List<ImageRampupDTO> imageRampupRequests = imageRampupPlanRequest.getImageRampups();
log.info("vaidateRampup imageRampupRequests: {} ", imageRampupRequests);
if (CollectionUtils.isEmpty(imageRampupRequests)) {
throw new ImageMgmtValidationException(ErrorCode.BAD_REQUEST, "Missing rampup details");
}
// Check for total rampup percentage
int totalRampupPercentage = 0;
for (final ImageRampupDTO imageRampupRequest : imageRampupRequests) {
totalRampupPercentage += imageRampupRequest.getRampupPercentage();
}
if (totalRampupPercentage != 100) {
throw new ImageMgmtValidationException(ErrorCode.BAD_REQUEST, "Total rampup percentage for "
+ "all the version must be 100");
}
// Check for duplicate image version
final Set<String> versions = new HashSet<>();
for (final ImageRampupDTO imageRampupRequest : imageRampupRequests) {
if (!versions.contains(imageRampupRequest.getImageVersion())) {
versions.add(imageRampupRequest.getImageVersion());
} else {
throw new ImageMgmtValidationException(ErrorCode.BAD_REQUEST, String.format("Duplicate "
+ "image version: %s.",
imageRampupRequest.getImageVersion()));
}
}
// check for stability tag and ramp up percentage
for (final ImageRampupDTO imageRampupRequest : imageRampupRequests) {
if (StabilityTag.UNSTABLE.equals(imageRampupRequest.getStabilityTag())
&& imageRampupRequest.getRampupPercentage() != 0) {
throw new ImageMgmtValidationException(ErrorCode.BAD_REQUEST, String.format("The image "
+ "version: %s is marked as UNSTABLE in the input and hence the rampup percentage "
+ "must be 0.",
imageRampupRequest.getImageVersion()));
}
}
return true;
}
|
Validate input provided as part of rampup request. Here are the validations - 1. Total rampup
percentage must add upto 100. 2. Rampup details must not have duplicate image versions. 3. If a
specific version is marked as UNSTABLE in the rampup, the corresponding rampup percentage must
be zero.
@param imageRampupPlanRequest
@return boolean
@throws ImageMgmtValidationException
|
vaidateRampup
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampupServiceImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageRampupServiceImpl.java
|
Apache-2.0
|
private boolean validateOwnership(final ImageTypeDTO imageType)
throws ImageMgmtValidationException {
// Check if ownership record exists
if (CollectionUtils.isEmpty(imageType.getOwnerships())
|| imageType.getOwnerships().size() < 2) {
log.error("Please specify at least two owners for the image type: {} ", imageType.getName());
throw new ImageMgmtValidationException(ErrorCode.BAD_REQUEST, String.format("Please specify"
+ " at least two owners for the image type: %s. ", imageType.getName()));
}
// Check if one of the owner is admin
if (!hasAdminRole(imageType)) {
log.error("Please specify at least one ADMIN owner for image type: {} ", imageType.getName());
throw new ImageMgmtValidationException(ErrorCode.BAD_REQUEST, String.format("Please specify"
+ " at least one ADMIN owner for image type: %s. ", imageType.getName()));
}
// Ownership metadata must not contain duplicate owners.
if (hasDuplicateOwner(imageType)) {
log.error("The ownership data contains duplicate owners.");
throw new ImageMgmtValidationException(ErrorCode.BAD_REQUEST,
"The ownership data contains duplicate owners.");
}
return true;
}
|
Validate image type ownership metadata.
@param imageType
@return boolean
@throws ImageMgmtValidationException
|
validateOwnership
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageTypeServiceImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageTypeServiceImpl.java
|
Apache-2.0
|
private boolean hasAdminRole(final ImageTypeDTO imageType) {
boolean hasAdminRole = false;
for (final ImageOwnershipDTO imageOwnership : imageType.getOwnerships()) {
if (Role.ADMIN.equals(imageOwnership.getRole())) {
hasAdminRole = true;
}
}
return hasAdminRole;
}
|
Validate if ownership information contains ADMIN role.
@param imageType
@return boolean
|
hasAdminRole
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageTypeServiceImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageTypeServiceImpl.java
|
Apache-2.0
|
private boolean hasDuplicateOwner(final ImageTypeDTO imageType) {
final Set<String> owners = new HashSet<>();
for (final ImageOwnershipDTO imageOwnership : imageType.getOwnerships()) {
if (owners.contains(imageOwnership.getOwner())) {
return true;
} else {
owners.add(imageOwnership.getOwner());
}
}
return false;
}
|
Validate if ownership information contains duplicate owners.
@param imageType
@return boolean
|
hasDuplicateOwner
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageTypeServiceImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/services/ImageTypeServiceImpl.java
|
Apache-2.0
|
private void handleCreateRampRule(final HttpServletRequest req,
final HttpServletResponse resp,
final User user)
throws ServletException {
String requestBody = HttpRequestUtils.getBody(req);
ImageRampRuleRequestDTO rampRuleRequestDTO;
try {
// while converting to requestDTO, validation on json/required parameters would be performed.
rampRuleRequestDTO = utils.convertToDTO(requestBody, ImageRampRuleRequestDTO.class);
rampRuleRequestDTO.setCreatedBy(user.getUserId());
rampRuleRequestDTO.setModifiedBy(user.getUserId());
imageRampRuleService.createRule(rampRuleRequestDTO, user);
resp.setStatus(HttpStatus.SC_CREATED);
} catch (ImageMgmtException e) {
LOG.error("failed to create a rampRule: " + requestBody);
resp.setStatus(e.getErrorCode().getCode(), e.getMessage());
}
}
|
create an exclusive rule for a certain version of the image,
Successful call would return CREATED(201).
@throws ImageMgmtException with different ErrorCode, and the detailed error message.
|
handleCreateRampRule
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
Apache-2.0
|
private void handleCreateHPFlowRule(final HttpServletRequest req,
final HttpServletResponse resp,
final User user)
throws ServletException {
String requestBody = HttpRequestUtils.getBody(req);
RampRuleOwnershipDTO hpFlowRuleRequestDTO;
try {
// while converting to requestDTO, validation on json/required parameters would be performed.
hpFlowRuleRequestDTO = utils.convertToDTO(requestBody, RampRuleOwnershipDTO.class);
hpFlowRuleRequestDTO.setCreatedBy(user.getUserId());
hpFlowRuleRequestDTO.setModifiedBy(user.getUserId());
imageRampRuleService.createHpFlowRule(hpFlowRuleRequestDTO, user);
resp.setStatus(HttpStatus.SC_CREATED);
} catch (ImageMgmtException e) {
LOG.error("failed to create a rampRule: " + requestBody);
resp.setStatus(e.getErrorCode().getCode(), e.getMessage());
}
}
|
create an exclusive rule for High priority flows which will deny all image Ramp Versions,
Successful call would return CREATED(201).
@throws ImageMgmtException with different ErrorCode, and the detailed error message.
|
handleCreateHPFlowRule
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
Apache-2.0
|
private void handleUpdateOwnerships(final HttpServletRequest req,
final HttpServletResponse resp,
final User user,
final ImageRampRuleService.OperationType type)
throws ServletException, IOException {
String requestBody = HttpRequestUtils.getBody(req);
RampRuleOwnershipDTO deltaOwnershipRequestDTO;
try {
// while converting to requestDTO, validation on json/required parameters would be performed.
deltaOwnershipRequestDTO = utils.convertToDTO(requestBody, RampRuleOwnershipDTO.class);
deltaOwnershipRequestDTO.setModifiedBy(user.getUserId());
String updatedOwners = imageRampRuleService.updateRuleOwnership(deltaOwnershipRequestDTO, user, type);
// prepare response
RampRuleOwnershipDTO responseModel = new RampRuleOwnershipDTO();
responseModel.setOwnerships(updatedOwners);
responseModel.setRuleName(deltaOwnershipRequestDTO.getRuleName());
sendResponse(resp, HttpServletResponse.SC_OK, responseModel);
} catch (ImageMgmtException e) {
LOG.error("failed to update ownerships: " + requestBody);
resp.setStatus(e.getErrorCode().getCode(), e.getMessage());
}
}
|
Add/Remove owners for a ramp rule.
Successful call would return OK(200).
@throws ImageMgmtException with different ErrorCode, and the detailed error message.
|
handleUpdateOwnerships
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
Apache-2.0
|
private void handleAddFlowsToRule(final HttpServletRequest req,
final HttpServletResponse resp,
final User user) throws ServletException {
String requestBody = HttpRequestUtils.getBody(req);
RampRuleFlowsDTO rampRuleFlowsDTO;
try {
rampRuleFlowsDTO = utils.convertToDTO(requestBody, RampRuleFlowsDTO.class);
imageRampRuleService.addFlowsToRule(rampRuleFlowsDTO.getFlowIds(), rampRuleFlowsDTO.getRuleName(), user);
resp.setStatus(HttpServletResponse.SC_OK);
} catch (ImageMgmtException e) {
LOG.error("fail to add flow to the rule: " + requestBody);
resp.setStatus(e.getErrorCode().getCode(), e.getMessage());
}
}
|
Add a list of flowIds to the rule.
flowIds will come with projectName and flowName.
Successful call would return OK(200).
@throws ImageMgmtException with different ErrorCode, and the detailed error message.
|
handleAddFlowsToRule
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
Apache-2.0
|
private void handleUpdateVersionOnRule(final HttpServletRequest req,
final HttpServletResponse resp,
final User user) {
try {
final String ruleName = getReqParam(req, RULE_NAME);
final String version = getReqParam(req, VERSION);
imageRampRuleService.updateVersionOnRule(version, ruleName, user);
resp.setStatus(HttpServletResponse.SC_OK);
} catch (ImageMgmtException e) {
LOG.error("fail to update version by rule " + req);
resp.setStatus(e.getErrorCode().getCode(), e.getMessage());
}
}
|
Updates a ramp rule's version based on given ruleName and version in parameters.
Successful call would return OK(200).
@throws ImageMgmtException with different ErrorCode, and the detailed error message.
|
handleUpdateVersionOnRule
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
Apache-2.0
|
private void handleDeleteRule(final HttpServletRequest req,
final HttpServletResponse resp,
final User user) {
try {
final String ruleName = getReqParam(req, RULE_NAME);
imageRampRuleService.deleteRule(ruleName, user);
resp.setStatus(HttpServletResponse.SC_OK);
} catch (ImageMgmtException e) {
LOG.error("fail to delete rule " + req);
resp.setStatus(e.getErrorCode().getCode(), e.getMessage());
}
}
|
Delete a ramp rule.
Successful call would return OK(200).
@throws ImageMgmtException with different ErrorCode, and the detailed error message.
|
handleDeleteRule
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
Apache-2.0
|
private String getReqParam(final HttpServletRequest req, final String requiredParam) {
final String ruleName = req.getParameter(requiredParam);
if (ruleName == null || ruleName.length() == 0) {
LOG.error("{} must not be null or empty", requiredParam);
throw new ImageMgmtInvalidInputException(ErrorCode.BAD_REQUEST, requiredParam +" must not be null or empty");
}
return ruleName;
}
|
Fetch RuleName parameter from request.
@param req - Http request {@link HttpServletRequest}
@return targetParam values
@throws ImageMgmtInvalidInputException if not found required param
|
getReqParam
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/servlets/ImageRampRuleServlet.java
|
Apache-2.0
|
public static <T> boolean validateObject(final T obj, final List<String> validationErrors) {
final Validator validator = validatorFactory.getValidator();
final Set<ConstraintViolation<T>> violations = validator.validate(obj);
if (violations.isEmpty()) {
return true;
}
log.error("Object validation failed for: " + obj.toString());
violations.forEach(violation -> {
validationErrors.add(violation.getMessage());
log.error(violation.getPropertyPath().toString() + " " + violation.getMessage());
});
return false;
}
|
Performs validation the supplied object and creates set of violations. Returns if there is no
violation. Returns false if there exist any violation.
@param obj
@param <T>
@param validationErrors
@return boolean
|
validateObject
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/utils/ValidatorUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/utils/ValidatorUtils.java
|
Apache-2.0
|
public static <T> boolean validateObject(final T obj, final List<String> validationErrors,
final Class<?> validationGroup) {
final Validator validator = validatorFactory.getValidator();
final Set<ConstraintViolation<T>> violations = validator.validate(obj, validationGroup);
if (violations.isEmpty()) {
return true;
}
log.error("Object validation failed for: " + obj.toString());
violations.forEach(violation -> {
validationErrors.add(violation.getMessage());
log.error(violation.getPropertyPath().toString() + " " + violation.getMessage());
});
return false;
}
|
Performs validation the supplied object and creates set of violations. Returns if there is no
violation. Returns false if there exist any violation. This method performs validation for the
annotations with validation group class.
@param obj
@param <T>
@return boolean
|
validateObject
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/imagemgmt/utils/ValidatorUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/imagemgmt/utils/ValidatorUtils.java
|
Apache-2.0
|
public synchronized boolean pauseJobIfPresent(final String jobName, final String groupName)
throws SchedulerException {
if (ifJobExist(jobName, groupName)) {
this.scheduler.pauseJob(new JobKey(jobName, groupName));
return true;
} else {
return false;
}
}
|
Pause a job if it's present.
@param jobName
@param groupName
@return true if job has been paused, no if job doesn't exist.
@throws SchedulerException
|
pauseJobIfPresent
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/scheduler/QuartzScheduler.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/scheduler/QuartzScheduler.java
|
Apache-2.0
|
public synchronized boolean isJobPaused(final String jobName, final String groupName)
throws SchedulerException {
if (!ifJobExist(jobName, groupName)) {
throw new SchedulerException(String.format("Job (job name %s, group name %s) doesn't "
+ "exist'", jobName, groupName));
}
final JobKey jobKey = new JobKey(jobName, groupName);
final JobDetail jobDetail = this.scheduler.getJobDetail(jobKey);
final List<? extends Trigger> triggers = this.scheduler.getTriggersOfJob(jobDetail.getKey());
for (final Trigger trigger : triggers) {
final TriggerState triggerState = this.scheduler.getTriggerState(trigger.getKey());
if (TriggerState.PAUSED.equals(triggerState)) {
return true;
}
}
return false;
}
|
Check if job is paused.
@return true if job is paused, false otherwise.
|
isJobPaused
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/scheduler/QuartzScheduler.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/scheduler/QuartzScheduler.java
|
Apache-2.0
|
public synchronized boolean resumeJobIfPresent(final String jobName, final String groupName)
throws SchedulerException {
if (ifJobExist(jobName, groupName)) {
this.scheduler.resumeJob(new JobKey(jobName, groupName));
return true;
} else {
return false;
}
}
|
Resume a job.
@param jobName
@param groupName
@return true the job has been resumed, no if the job doesn't exist.
@throws SchedulerException
|
resumeJobIfPresent
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/scheduler/QuartzScheduler.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/scheduler/QuartzScheduler.java
|
Apache-2.0
|
public synchronized boolean unscheduleJob(final String jobName, final String groupName) throws
SchedulerException {
return this.scheduler.deleteJob(new JobKey(jobName, groupName));
}
|
Unschedule a job.
@param jobName
@param groupName
@return true if job is found and unscheduled.
@throws SchedulerException
|
unscheduleJob
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/scheduler/QuartzScheduler.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/scheduler/QuartzScheduler.java
|
Apache-2.0
|
@Override
public Job newJob(final TriggerFiredBundle bundle, final Scheduler scheduler)
throws SchedulerException {
return (Job) this.injector.getInstance(bundle.getJobDetail()
.getJobClass());
}
|
Produce Guice-able Job in this custom defined Job Factory.
In order to allow Quaratz jobs easily inject dependency, we create this factory. Every Quartz job
will be constructed by newJob method.
|
newJob
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/scheduler/SchedulerJobFactory.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/scheduler/SchedulerJobFactory.java
|
Apache-2.0
|
@VisibleForTesting
protected Map<String, AbstractAzkabanServlet> getRoutesMap() {
final String defaultServletPath =
this.props.getString("azkaban.default.servlet.path", "/index");
final Map<String, AbstractAzkabanServlet> routesMap = new HashMap<>();
routesMap.put("/index", new ProjectServlet());
routesMap.put("/manager", new ProjectManagerServlet());
routesMap.put("/executor", new ExecutorServlet());
routesMap.put("/schedule", new ScheduleServlet());
routesMap.put("/triggers", new TriggerManagerServlet());
routesMap.put("/flowtrigger", new FlowTriggerServlet());
routesMap.put("/flowtriggerinstance", new FlowTriggerInstanceServlet());
routesMap.put("/history", new HistoryServlet());
routesMap.put("/jmx", new JMXHttpServlet());
routesMap.put("/stats", new StatsServlet());
routesMap.put("/notes", new NoteServlet());
routesMap.put("/", new IndexRedirectServlet(defaultServletPath));
routesMap.put("/status", new StatusServlet(this.statusService));
if (isContainerizedDispatchMethodEnabled()) {
routesMap.put("/imageTypes/*", new ImageTypeServlet());
routesMap.put("/imageVersions/*", new ImageVersionServlet());
routesMap.put("/imageRampup/*", new ImageRampupServlet());
routesMap.put("/imageRampRule/*", new ImageRampRuleServlet());
}
return routesMap;
}
|
@return The routing map of path to servlets.
|
getRoutesMap
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/AzkabanWebServer.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/AzkabanWebServer.java
|
Apache-2.0
|
public Map<Integer, Executor> getExecutorStatusMap() {
return executorStatusMap;
}
|
This POJO is used by GSON library to create a status JSON object. This class represents status
for azkaban cluster.
|
getExecutorStatusMap
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/ClusterStatus.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/ClusterStatus.java
|
Apache-2.0
|
public Map<String, ImageVersionMetadataResponseDTO> getImageTypeVersionMap() {
return imageTypeVersionMap;
}
|
This POJO is used by GSON library to create a status JSON object. This class represents status
for containerized cluster.
|
getImageTypeVersionMap
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/ContainerizedClusterStatus.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/ContainerizedClusterStatus.java
|
Apache-2.0
|
public String getCSRFTokenFromSession(Session session) {
if (null == session || StringUtils.isEmpty(session.getSessionId())) {
return null;
}
String sessionId = session.getSessionId();
return getCSRFTokenFromSessionId(sessionId);
}
|
@param session containing session-id
@return CSRF token derived from session-id
|
getCSRFTokenFromSession
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/CSRFTokenUtility.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/CSRFTokenUtility.java
|
Apache-2.0
|
private String getCSRFTokenFromSessionId(String sessionId) {
byte[] bytes;
try {
bytes = sessionId.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
logger.info("Unable to convert sessionId to bytes", e);
return null;
}
byte[] hashedSessionId = this.hashFunction.doFinal(bytes);
return Base64.getEncoder().encodeToString(hashedSessionId);
}
|
@param sessionId
@return CSRF token derived from session-id
|
getCSRFTokenFromSessionId
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/CSRFTokenUtility.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/CSRFTokenUtility.java
|
Apache-2.0
|
public boolean validateCSRFToken(String sessionId, String csrfTokenFromRequest) {
String csrfTokenFromSessionId = getCSRFTokenFromSessionId(sessionId);
if (StringUtils.isEmpty(csrfTokenFromSessionId)) {
return false;
}
return csrfTokenFromRequest.equals(csrfTokenFromSessionId);
}
|
@param sessionId
@param csrfTokenFromRequest
@return True if the csrfTokenFromRequest matches with the CSRFToken generated from session-id,
otherwise false
|
validateCSRFToken
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/CSRFTokenUtility.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/CSRFTokenUtility.java
|
Apache-2.0
|
public Status getStatus() {
if (isContainerizedDispatchMethodEnabled()) {
return getContainerizedStatus();
}
return getClusterStatus();
}
|
Gets the status of the azkaban cluster.
@return Status
|
getStatus
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/StatusService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/StatusService.java
|
Apache-2.0
|
private Status getClusterStatus() {
// Build the status object
return new ClusterStatus(getVersion(),
getPid(),
getInstallationPath(),
getUsedMemory(),
getMaxMemory(),
getDbStatus(),
getActiveExecutors());
}
|
This returns implementation instance for Status containing status information for Azkaban
cluster pertaining to web server, memory, active executors etc.
@return Status
|
getClusterStatus
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/StatusService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/StatusService.java
|
Apache-2.0
|
protected void setSessionValue(final HttpServletRequest request, final String key, final Object value) {
request.getSession(true).setAttribute(key, value);
}
|
Returns the session value of the request.
|
setSessionValue
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
Apache-2.0
|
protected void addSessionValue(final HttpServletRequest request, final String key, final Object value) {
List l = (List) request.getSession(true).getAttribute(key);
if (l == null) {
l = new ArrayList();
}
l.add(value);
request.getSession(true).setAttribute(key, l);
}
|
Adds a session value to the request
|
addSessionValue
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
Apache-2.0
|
protected void setErrorMessageInCookie(final HttpServletResponse response, final String errorMsg) {
final Cookie cookie = new Cookie(AZKABAN_FAILURE_MESSAGE, errorMsg);
cookie.setPath("/");
response.addCookie(cookie);
}
|
Sets an error message in azkaban.failure.message in the cookie. This will be used by the web
client javascript to somehow display the message
|
setErrorMessageInCookie
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
Apache-2.0
|
protected void setWarnMessageInCookie(final HttpServletResponse response, final String errorMsg) {
final Cookie cookie = new Cookie(AZKABAN_WARN_MESSAGE, errorMsg);
cookie.setPath("/");
response.addCookie(cookie);
}
|
Sets a warning message in azkaban.warn.message in the cookie. This will be used by the web
client javascript to somehow display the message
|
setWarnMessageInCookie
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
Apache-2.0
|
protected void setSuccessMessageInCookie(final HttpServletResponse response, final String message) {
final Cookie cookie = new Cookie(AZKABAN_SUCCESS_MESSAGE, message);
cookie.setPath("/");
response.addCookie(cookie);
}
|
Sets a message in azkaban.success.message in the cookie. This will be used by the web client
javascript to somehow display the message
|
setSuccessMessageInCookie
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
Apache-2.0
|
protected String getSuccessMessageFromCookie(final HttpServletRequest request) {
final Cookie cookie = getCookieByName(request, AZKABAN_SUCCESS_MESSAGE);
if (cookie == null) {
return null;
}
return cookie.getValue();
}
|
Retrieves a success message from a cookie. azkaban.success.message
|
getSuccessMessageFromCookie
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
Apache-2.0
|
protected String getWarnMessageFromCookie(final HttpServletRequest request) {
final Cookie cookie = getCookieByName(request, AZKABAN_WARN_MESSAGE);
if (cookie == null) {
return null;
}
return cookie.getValue();
}
|
Retrieves a warn message from a cookie. azkaban.warn.message
|
getWarnMessageFromCookie
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
Apache-2.0
|
protected String getErrorMessageFromCookie(final HttpServletRequest request) {
final Cookie cookie = getCookieByName(request, AZKABAN_FAILURE_MESSAGE);
if (cookie == null) {
return null;
}
return cookie.getValue();
}
|
Retrieves a success message from a cookie. azkaban.failure.message
|
getErrorMessageFromCookie
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
Apache-2.0
|
protected Cookie getCookieByName(final HttpServletRequest request, final String name) {
final Cookie[] cookies = request.getCookies();
if (cookies != null) {
for (final Cookie cookie : cookies) {
if (name.equals(cookie.getName())) {
return cookie;
}
}
}
return null;
}
|
Retrieves a cookie by name. Potential issue in performance if a lot of cookie variables are
used.
|
getCookieByName
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
Apache-2.0
|
protected Page newPage(final HttpServletRequest req, final HttpServletResponse resp, final Session session,
final String template) {
final Page page = new Page(req, resp, getApplication().getVelocityEngine(), template);
page.add("version", jarVersion);
page.add("azkaban_name", this.name);
page.add("azkaban_label", this.label);
page.add("azkaban_color", this.color);
page.add("azkaban_depth", this.depth);
page.add("note_type", NoteServlet.type);
page.add("note_message", NoteServlet.message);
page.add("note_url", NoteServlet.url);
page.add("timezone", TimeZone.getDefault().getID());
page.add("currentTime", (new DateTime()).getMillis());
page.add("size", getDisplayExecutionPageSize());
page.add("System", System.class);
page.add("TimeUtils", TimeUtils.class);
page.add("WebUtils", WebUtils.class);
page.add("HTMLFormElementType_TEXTAREA", HTMLFormElementType.TEXTAREA);
if (session != null && session.getUser() != null) {
page.add("user_id", session.getUser().getUserId());
}
final String errorMsg = getErrorMessageFromCookie(req);
page.add("error_message", errorMsg == null || errorMsg.isEmpty() ? "null" : errorMsg);
setErrorMessageInCookie(resp, null);
final String warnMsg = getWarnMessageFromCookie(req);
page.add("warn_message", warnMsg == null || warnMsg.isEmpty() ? "null" : warnMsg);
setWarnMessageInCookie(resp, null);
final String successMsg = getSuccessMessageFromCookie(req);
page.add("success_message", successMsg == null || successMsg.isEmpty() ? "null" : successMsg);
setSuccessMessageInCookie(resp, null);
// @TODO, allow more than one type of viewer. For time sake, I only install
// the first one
if (this.viewerPlugins != null && !this.viewerPlugins.isEmpty()) {
page.add("viewers", this.viewerPlugins);
}
if (this.triggerPlugins != null && !this.triggerPlugins.isEmpty()) {
page.add("triggerPlugins", this.triggerPlugins);
}
return page;
}
|
Creates a new velocity page to use. With session.
|
newPage
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/webapp/servlet/AbstractAzkabanServlet.java
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.