code
stringlengths 23
201k
| docstring
stringlengths 17
96.2k
| func_name
stringlengths 0
235
| language
stringclasses 1
value | repo
stringlengths 8
72
| path
stringlengths 11
317
| url
stringlengths 57
377
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
public URI getExpectedReverseProxyContainerizedURI() throws IOException {
return buildExecutorUri(null, 1, "container", false, DispatchMethod.CONTAINERIZED, (Pair<String, String>[]) null);
}
|
This test tries to verify the the flow is finalized and restarted if the dispatch fails.
@throws Exception
|
getExpectedReverseProxyContainerizedURI
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
Apache-2.0
|
@Override
public URI buildExecutorUri(String host, int port, String path,
boolean isHttp, final DispatchMethod dispatchMethod, Pair<String, String>... params) throws IOException {
this.lastBuildExecutorUriRespone = super.buildExecutorUri(host, port, path, isHttp, dispatchMethod, params);
return lastBuildExecutorUriRespone;
}
|
This test tries to verify the the flow is finalized and restarted if the dispatch fails.
@throws Exception
|
buildExecutorUri
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
Apache-2.0
|
@Override
public String httpPost(URI uri, Optional<Integer> httpTimeout,
List<Pair<String, String>> params)
throws IOException {
this.lastHttpPostUri = uri;
this.lastHttpPostParams = params;
return nextHttpPostResponse;
}
|
This test tries to verify the the flow is finalized and restarted if the dispatch fails.
@throws Exception
|
httpPost
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
Apache-2.0
|
public void setNextHttpPostResponse(String nextHttpPostResponse) {
this.nextHttpPostResponse = nextHttpPostResponse;
}
|
This test tries to verify the the flow is finalized and restarted if the dispatch fails.
@throws Exception
|
setNextHttpPostResponse
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
Apache-2.0
|
public URI getLastBuildExecutorUriRespone() {
return lastBuildExecutorUriRespone;
}
|
This test tries to verify the the flow is finalized and restarted if the dispatch fails.
@throws Exception
|
getLastBuildExecutorUriRespone
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
Apache-2.0
|
public URI getLastHttpPostUri() {
return lastHttpPostUri;
}
|
This test tries to verify the the flow is finalized and restarted if the dispatch fails.
@throws Exception
|
getLastHttpPostUri
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
Apache-2.0
|
public List<Pair<String, String>> getLastHttpPostParams() {
return lastHttpPostParams;
}
|
This test tries to verify the the flow is finalized and restarted if the dispatch fails.
@throws Exception
|
getLastHttpPostParams
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
Apache-2.0
|
@After
public void shutdown() {
if(this.containerizedDispatchManager != null) {
this.containerizedDispatchManager.shutdown();
}
}
|
This test tries to verify the the flow is finalized and restarted if the dispatch fails.
@throws Exception
|
shutdown
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
|
Apache-2.0
|
@Test
public void testGetApplicationIdsFromLog() throws Exception {
when(this.executorLoader.fetchActiveFlowByExecId(this.flow1.getExecutionId()))
.thenReturn(new Pair<>(this.ref1, this.flow1));
// Verify that application ids are obtained successfully from the log data.
final String logData1 = "Submitted application_12345_6789.";
when(this.apiGateway.callWithReference(any(), eq(ConnectorParams.LOG_ACTION), any()))
.then(getLogChunksMock(logData1));
Set<String> appIds = this.controller.getApplicationIds(this.flow1, "job1", 0);
Assert.assertEquals(1, appIds.size());
Assert.assertEquals("12345_6789", appIds.iterator().next());
final String logData2 = " Submitted application_12345_6789.\n AttemptID: attempt_12345_6789. "
+ "Accepted application_98765_4321.";
when(this.apiGateway.callWithReference(any(), eq(ConnectorParams.LOG_ACTION), any()))
.then(getLogChunksMock(logData2));
appIds = this.controller.getApplicationIds(this.flow1, "job1", 0);
Assert.assertEquals(2, appIds.size());
final Iterator iterator = appIds.iterator();
Assert.assertEquals("12345_6789", iterator.next());
Assert.assertEquals("98765_4321", iterator.next());
// Verify that an empty list is returned when log data length is 0 (no new data available).
when(this.apiGateway.callWithReference(any(), eq(ConnectorParams.LOG_ACTION), any()))
.then(getLogChunksMock(""));
Assert.assertEquals(0, this.controller.getApplicationIds(this.flow1, "job1", 0).size());
}
|
Test fetching application ids from log data.
@throws Exception the exception
|
testGetApplicationIdsFromLog
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerTest.java
|
Apache-2.0
|
private Answer<Object> getLogChunksMock(final String logData) {
return invocationOnMock -> {
String offsetStr = null, lengthStr = null;
for (final Object arg : invocationOnMock.getArguments()) {
if (!(arg instanceof Pair)) {
continue;
}
final Pair pairArg = (Pair) arg;
if ("offset".equals(pairArg.getFirst())) {
offsetStr = (String) pairArg.getSecond();
} else if ("length".equals(pairArg.getFirst())) {
lengthStr = (String) pairArg.getSecond();
}
}
Assert.assertNotNull(offsetStr);
Assert.assertNotNull(lengthStr);
final int offset = Integer.parseInt(offsetStr);
final int length = Integer.parseInt(lengthStr);
final int actualLength = Math.min(length, Math.max(0, logData.length() - offset));
final String logChunk = logData.substring(offset, offset + actualLength);
return ImmutableMap.of("offset", offset, "length", actualLength, "data", logChunk);
};
}
|
Test fetching application ids from log data.
@throws Exception the exception
|
getLogChunksMock
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerTest.java
|
Apache-2.0
|
private void submitFlow(final ExecutableFlow flow, final ExecutionReference ref) throws
Exception {
when(this.executorLoader.fetchExecutableFlow(flow.getExecutionId())).thenReturn(flow);
this.controller.submitExecutableFlow(flow, this.user.getUserId());
this.unfinishedFlows.put(flow.getExecutionId(), new Pair<>(ref, flow));
initializeUnfinishedFlowMock();
}
|
Test fetching application ids from log data.
@throws Exception the exception
|
submitFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerTest.java
|
Apache-2.0
|
private void initializeUnfinishedFlows() throws Exception {
this.unfinishedFlows = ImmutableMap
.of(this.flow1.getExecutionId(), new Pair<>(this.ref1, this.flow1),
this.flow2.getExecutionId(), new Pair<>(this.ref2, this.flow2),
this.flow3.getExecutionId(), new Pair<>(this.ref3, this.flow3));
initializeUnfinishedFlowMock();
}
|
Test fetching application ids from log data.
@throws Exception the exception
|
initializeUnfinishedFlows
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerTest.java
|
Apache-2.0
|
private void initializeUnfinishedFlowMock() throws Exception {
when(this.executorLoader.fetchUnfinishedFlows()).thenReturn(this.unfinishedFlows);
when(this.executorLoader.fetchUnfinishedFlow(anyInt())).thenAnswer(
(Answer<Pair<ExecutionReference, ExecutableFlow>>) invocation -> {
Object[] arguments = invocation.getArguments();
int executionId = (Integer) arguments[0];
List<Pair<ExecutionReference, ExecutableFlow>> list = unfinishedFlows.values().stream()
.filter(entry -> entry.getSecond().getExecutionId() == executionId)
.collect(Collectors.toList());
return (list.isEmpty()) ? null : list.get(0);
});
when(this.executorLoader.selectUnfinishedFlows(anyInt(), anyString())).thenAnswer(
(Answer<List<Integer>>) invocation -> {
Object[] arguments = invocation.getArguments();
int projectId = (Integer) arguments[0];
String flowId = (String) arguments[1];
return unfinishedFlows.values().stream()
.filter(entry -> entry.getSecond().getProjectId() == projectId && entry.getSecond().getFlowId().equals(flowId))
.map(entry -> entry.getSecond().getExecutionId())
.collect(Collectors.toList());
});
when(this.executorLoader.selectUnfinishedFlows()).thenReturn(
new ArrayList<>(this.unfinishedFlows.keySet()));
}
|
Test fetching application ids from log data.
@throws Exception the exception
|
initializeUnfinishedFlowMock
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerTest.java
|
Apache-2.0
|
@Test
public void jobLinkUrlBasedOnSparkHistoryServerUrlForUnroutedJobs() throws Exception {
mockStatic(AuthenticationUtils.class);
final HttpURLConnection connection = mock(HttpURLConnection.class);
when(connection.getInputStream()).thenReturn(
new ByteArrayInputStream("Failed to read the application".getBytes("UTF-8"))
);
// mock AuthenticationUtils so that RM job link is no longer valid
when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString()))
.thenReturn(connection);
// create a flow that contains one job that was never routed
final ExecutableNode node = createExecutableNode("testJob", "spark", null);
final ExecutableFlow flow = createSingleNodeFlow(node);
// populate azkaban web server properties
final Props azkProps = new Props();
final String resourceManagerUrl =
"http://localhost:8088/cluster/app/application_${application.id}";
azkProps.put(ConfigurationKeys.RESOURCE_MANAGER_JOB_URL, resourceManagerUrl);
final String historyServerUrl =
"http://localhost:19888/jobhistory/job/job_${application.id}";
azkProps.put(ConfigurationKeys.HISTORY_SERVER_JOB_URL, historyServerUrl);
final String sparkHistoryServerUrl =
"http://localhost:18080/history/application_${application.id}/1/jobs";
azkProps.put(ConfigurationKeys.SPARK_HISTORY_SERVER_JOB_URL, sparkHistoryServerUrl);
azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath");
azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest");
// final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps)
final String applicationId = "123456789";
final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl(
flow, node.getId(), applicationId, azkProps);
final String expectedJobLinkUrl = sparkHistoryServerUrl.replace(
ExecutionControllerUtils.OLD_APPLICATION_ID, applicationId);
Assert.assertEquals(expectedJobLinkUrl, jobLinkUrl);
}
|
Verify for a Spark job that is not routed to any cluster, its job link url is based on Spark
History Server URL when the RM job link is invalid.
|
jobLinkUrlBasedOnSparkHistoryServerUrlForUnroutedJobs
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void jobLinkUrlBasedOnResourceManagerUrlForUnroutedJobs() throws Exception {
mockStatic(AuthenticationUtils.class);
final HttpURLConnection connection = mock(HttpURLConnection.class);
when(connection.getInputStream()).thenReturn(
new ByteArrayInputStream("SUCCESS".getBytes("UTF-8"))
);
// mock AuthenticationUtils so that RM request to validate job link succeeds
when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString()))
.thenReturn(connection);
// create a flow that contains one node that was never routed, having no any cluster info
final ExecutableNode node = createExecutableNode("testJob", "spark", null);
final ExecutableFlow flow = createSingleNodeFlow(node);
// populate azkaban web server properties
final Props azkProps = new Props();
final String resourceManagerUrl =
"http://localhost:8088/cluster/app/application_${application.id}";
azkProps.put(ConfigurationKeys.RESOURCE_MANAGER_JOB_URL, resourceManagerUrl);
final String historyServerUrl =
"http://localhost:19888/jobhistory/job/job_${application.id}";
azkProps.put(ConfigurationKeys.HISTORY_SERVER_JOB_URL, historyServerUrl);
final String sparkHistoryServerUrl =
"http://localhost:18080/history/application_${application.id}/1/jobs";
azkProps.put(ConfigurationKeys.SPARK_HISTORY_SERVER_JOB_URL, sparkHistoryServerUrl);
azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath");
azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest");
// final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps)
final String applicationId = "123456789";
final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl(
flow, node.getId(), applicationId, azkProps);
Assert.assertEquals(resourceManagerUrl.replace(ExecutionControllerUtils.OLD_APPLICATION_ID,
applicationId), jobLinkUrl);
}
|
Verify for a a given job that is not routed to any cluster, a job link url based on Resource
Manager URL if returned when the RM job link is still valid.
|
jobLinkUrlBasedOnResourceManagerUrlForUnroutedJobs
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void noJobLinkUrlForUnroutedJobWhenResourceManagerConnectionFails() throws Exception {
mockStatic(AuthenticationUtils.class);
// mock AuthenticationUtils so that RM request to validate job link fails
when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString()))
.thenThrow(new Exception("Connection failed"));
// create a flow that contains one node that was never routed, having no any cluster info
final ExecutableNode node = createExecutableNode("testJob", "spark", null);
final ExecutableFlow flow = createSingleNodeFlow(node);
// populate azkaban web server properties
final Props azkProps = new Props();
final String resourceManagerUrl =
"http://localhost:8088/cluster/app/application_${application.id}";
azkProps.put(ConfigurationKeys.RESOURCE_MANAGER_JOB_URL, resourceManagerUrl);
final String historyServerUrl =
"http://localhost:19888/jobhistory/job/job_${application.id}";
azkProps.put(ConfigurationKeys.HISTORY_SERVER_JOB_URL, historyServerUrl);
final String sparkHistoryServerUrl =
"http://localhost:18080/history/application_${application.id}/1/jobs";
azkProps.put(ConfigurationKeys.SPARK_HISTORY_SERVER_JOB_URL, sparkHistoryServerUrl);
azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath");
azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest");
// final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps)
final String applicationId = "123456789";
final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl(
flow, node.getId(), applicationId, azkProps);
Assert.assertEquals(null, jobLinkUrl);
}
|
Verify for a given job that is not routed to any cluster, no job link URL is returned when the
connection to RM to validate the RM job link fails.
|
noJobLinkUrlForUnroutedJobWhenResourceManagerConnectionFails
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void jobLinkUrlBasedOnSparkHistoryServerUrlForRoutedJobs() throws Exception {
mockStatic(AuthenticationUtils.class);
final HttpURLConnection connection = mock(HttpURLConnection.class);
when(connection.getInputStream()).thenReturn(
new ByteArrayInputStream("Failed to read the application".getBytes("UTF-8"))
);
// mock AuthenticationUtils so that RM job link is no longer valid
when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString()))
.thenReturn(connection);
// create a flow that contains one job that was routed to a test cluster
final String resourceManagerUrl =
"http://localhost:8088/cluster/app/application_${application.id}";
final String historyServerUrl =
"http://localhost:19888/jobhistory/job/job_${application.id}";
final String sparkHistoryServerUrl =
"http://localhost:18080/history/application_${application.id}/1/jobs";
final String hadoopClusterUrl = "http://localhost:8088";
final ClusterInfo cluster = new ClusterInfo("testCluster", hadoopClusterUrl,
resourceManagerUrl, historyServerUrl, sparkHistoryServerUrl);
final ExecutableNode node = createExecutableNode("testJob", "spark", cluster);
final ExecutableFlow flow = createSingleNodeFlow(node);
// populate azkaban web server properties
final Props azkProps = new Props();
azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath");
azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest");
// final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps)
final String applicationId = "123456789";
final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl(
flow, node.getId(), applicationId, azkProps);
final String expectedJobLinkUrl =
sparkHistoryServerUrl.replace(ExecutionControllerUtils.NEW_APPLICATION_ID, applicationId);
Assert.assertEquals(expectedJobLinkUrl, jobLinkUrl);
}
|
Verify for a given Spark job routed to a cluster previously, its job link url is based on Spark
History Server URL when the RM job link is invalid.
|
jobLinkUrlBasedOnSparkHistoryServerUrlForRoutedJobs
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void noJobLinkUrlForRoutedJobsWhenMissingFullClusterInfo() throws Exception {
mockStatic(AuthenticationUtils.class);
final HttpURLConnection connection = mock(HttpURLConnection.class);
when(connection.getInputStream()).thenReturn(
new ByteArrayInputStream("Failed to read the application".getBytes("UTF-8"))
);
// mock AuthenticationUtils so that RM job link is no longer valid
when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString()))
.thenReturn(connection);
final String resourceManagerUrl =
"http://localhost:8088/cluster/app/application_${application.id}";
final String sparkHistoryServerUrl =
"http://localhost:18080/history/application_${application.id}/1/jobs";
final String hadoopClusterUrl = "http://localhost:8088";
// create a cluster that is missing History Server URL
final ClusterInfo cluster = new ClusterInfo("testCluster", hadoopClusterUrl,
resourceManagerUrl, null, sparkHistoryServerUrl);
final ExecutableNode node = createExecutableNode("testJob", "spark", cluster);
// create a flow that contains one job that was routed to a test cluster
final ExecutableFlow flow = createSingleNodeFlow(node);
// populate azkaban web server properties
final Props azkProps = new Props();
azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath");
azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest");
// final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps)
final String applicationId = "123456789";
final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl(
flow, node.getId(), applicationId, azkProps);
Assert.assertNull(jobLinkUrl);
}
|
Verify for a given job that is routed to a cluster previously, no job link url is returned if
Resource Manager URL, Spark History Server URL or History Server URL, is missing.
|
noJobLinkUrlForRoutedJobsWhenMissingFullClusterInfo
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void jobLinkUrlBasedOnResourceManagerUrlForRoutedJobs() throws Exception {
mockStatic(AuthenticationUtils.class);
final HttpURLConnection connection = mock(HttpURLConnection.class);
when(connection.getInputStream()).thenReturn(
new ByteArrayInputStream("SUCCESS".getBytes("UTF-8"))
);
// mock AuthenticationUtils so that RM request to validate job link succeeds
when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString()))
.thenReturn(connection);
// create a flow that contains one job that was routed to a test cluster
final String resourceManagerUrl =
"http://localhost:8088/cluster/app/application_${application.id}";
final String historyServerUrl =
"http://localhost:19888/jobhistory/job/job_${application.id}";
final String sparkHistoryServerUrl =
"http://localhost:18080/history/application_${application.id}/1/jobs";
final String hadoopClusterUrl = "http://localhost:8088";
final ClusterInfo cluster = new ClusterInfo("testCluster", hadoopClusterUrl,
resourceManagerUrl, historyServerUrl, sparkHistoryServerUrl);
final ExecutableNode node = createExecutableNode("testJob", "spark", cluster);
final ExecutableFlow flow = createSingleNodeFlow(node);
// populate azkaban web server properties
final Props azkProps = new Props();
azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath");
azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest");
// final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps)
final String applicationId = "123456789";
final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl(
flow, node.getId(), applicationId, azkProps);
final String expectedJobLinkUrl = resourceManagerUrl.replace(
ExecutionControllerUtils.NEW_APPLICATION_ID, applicationId);
Assert.assertEquals(expectedJobLinkUrl, jobLinkUrl);
}
|
Verify for a a given job that is routed to a cluster previously, a job link url based on
Resource Manager URL is returned when the RM job link is still valid.
|
jobLinkUrlBasedOnResourceManagerUrlForRoutedJobs
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void noJobLinkUrlForRoutedJobWhenResourceManagerConnectionFails() throws Exception {
mockStatic(AuthenticationUtils.class);
// mock AuthenticationUtils so that RM request to validate job link fails
when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString()))
.thenThrow(new Exception("RM Connection failed"));
// create a flow that contains one job that was routed to a test cluster
final String resourceManagerUrl =
"http://localhost:8088/cluster/app/application_${application.id}";
final String historyServerUrl =
"http://localhost:19888/jobhistory/job/job_${application.id}";
final String sparkHistoryServerUrl =
"http://localhost:18080/history/application_${application.id}/1/jobs";
final String hadoopClusterUrl = "http://localhost:8088";
final ClusterInfo cluster = new ClusterInfo("testCluster", hadoopClusterUrl,
resourceManagerUrl, historyServerUrl, sparkHistoryServerUrl);
final ExecutableNode node = createExecutableNode("testJob", "spark", cluster);
final ExecutableFlow flow = createSingleNodeFlow(node);
// populate azkaban web server properties
final Props azkProps = new Props();
azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath");
azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest");
// final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps)
final String applicationId = "123456789";
final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl(
flow, node.getId(), applicationId, azkProps);
Assert.assertNull(jobLinkUrl);
}
|
Verify for a given job that is routed to a cluster previously, no job link URL is returned when
the connection to RM to validate the RM job link fails.
|
noJobLinkUrlForRoutedJobWhenResourceManagerConnectionFails
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
private ExecutableNode createExecutableNode(final String id, final String type,
final ClusterInfo clusterInfo) {
final ExecutableNode node = mock(ExecutableNode.class);
when(node.getType()).thenReturn(type);
when(node.getId()).thenReturn(id);
when(node.getClusterInfo()).thenReturn(clusterInfo);
return node;
}
|
Verify for a given job that is routed to a cluster previously, no job link URL is returned when
the connection to RM to validate the RM job link fails.
|
createExecutableNode
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
private ExecutableFlow createSingleNodeFlow(final ExecutableNode node) {
final ExecutableFlow flow = mock(ExecutableFlow.class);
when(flow.getExecutableNodePath(anyString())).thenReturn(node);
return flow;
}
|
Verify for a given job that is routed to a cluster previously, no job link URL is returned when
the connection to RM to validate the RM job link fails.
|
createSingleNodeFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void testGetFlowToRestartSuccess_EXECUTION_STOPPED() {
ExecutableFlow testFlow = new ExecutableFlow();
ExecutionOptions options = new ExecutionOptions();
options.addAllFlowParameters(ImmutableMap.of(
FlowParameters.FLOW_PARAM_ALLOWED_RETRY_STATUS, "EXECUTION_STOPPED,FAILED",
FlowParameters.FLOW_PARAM_MAX_RETRIES, "2"
));
testFlow.setExecutionOptions(options);
ExecutableFlow flowToRestart = ExecutionControllerUtils.getFlowToRestart(testFlow,
Status.EXECUTION_STOPPED);
assertNotNull(flowToRestart);
assertEquals(0, flowToRestart.getSystemDefinedRetryCount());
assertEquals(1, flowToRestart.getUserDefinedRetryCount());
}
|
Verify for a given job that is routed to a cluster previously, no job link URL is returned when
the connection to RM to validate the RM job link fails.
|
testGetFlowToRestartSuccess_EXECUTION_STOPPED
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void testGetFlowToRestartSuccessWithoutMaxRetry_EXECUTION_STOPPED() {
ExecutableFlow testFlow = new ExecutableFlow();
ExecutionOptions options = new ExecutionOptions();
options.addAllFlowParameters(ImmutableMap.of(
FlowParameters.FLOW_PARAM_ALLOWED_RETRY_STATUS, "EXECUTION_STOPPED,FAILED"
));
testFlow.setExecutionOptions(options);
ExecutableFlow flowToRestart = ExecutionControllerUtils.getFlowToRestart(testFlow,
Status.EXECUTION_STOPPED);
assertNotNull(flowToRestart);
assertEquals(0, flowToRestart.getSystemDefinedRetryCount());
assertEquals(1, flowToRestart.getUserDefinedRetryCount());
}
|
Verify for a given job that is routed to a cluster previously, no job link URL is returned when
the connection to RM to validate the RM job link fails.
|
testGetFlowToRestartSuccessWithoutMaxRetry_EXECUTION_STOPPED
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void testGetFlowToRestartNoExecutionOptions() {
ExecutableFlow testFlow = new ExecutableFlow();
testFlow.setExecutionOptions(null);
ExecutableFlow flowToRestart = ExecutionControllerUtils.getFlowToRestart(testFlow,
Status.PREPARING);
assertNull(flowToRestart);
}
|
Verify for a given job that is routed to a cluster previously, no job link URL is returned when
the connection to RM to validate the RM job link fails.
|
testGetFlowToRestartNoExecutionOptions
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void testGetFlowToRestartNoSystemRetriedExceed() {
ExecutableFlow testFlow = new ExecutableFlow();
testFlow.setExecutionOptions(new ExecutionOptions());
testFlow.setSystemDefinedRetryCount(1);
ExecutableFlow flowToRestart = ExecutionControllerUtils.getFlowToRestart(testFlow,
Status.PREPARING);
assertNull(flowToRestart);
}
|
Verify for a given job that is routed to a cluster previously, no job link URL is returned when
the connection to RM to validate the RM job link fails.
|
testGetFlowToRestartNoSystemRetriedExceed
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void testGetFlowToRestart_NoOperation_NotIncludedStatus_KILLED() {
ExecutableFlow testFlow = new ExecutableFlow();
ExecutionOptions options = new ExecutionOptions();
options.addAllFlowParameters(ImmutableMap.of(
FlowParameters.FLOW_PARAM_ALLOWED_RETRY_STATUS, "EXECUTION_STOPPED,FAILED",
FlowParameters.FLOW_PARAM_MAX_RETRIES, "2"
));
testFlow.setExecutionOptions(options);
ExecutableFlow flowToRestart = ExecutionControllerUtils.getFlowToRestart(testFlow,
Status.KILLED);
assertNull(flowToRestart);
}
|
Verify for a given job that is routed to a cluster previously, no job link URL is returned when
the connection to RM to validate the RM job link fails.
|
testGetFlowToRestart_NoOperation_NotIncludedStatus_KILLED
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void testGetFlowToRestartSuccess_PREPARING() {
final ExecutableNode node = createExecutableNode("testJob", "spark", null);
ExecutableFlow testFlow = new ExecutableFlow();
ExecutionOptions options = new ExecutionOptions();
options.addAllFlowParameters(ImmutableMap.of(
FlowParameters.FLOW_PARAM_ALLOWED_RETRY_STATUS, "EXECUTION_STOPPED,FAILED",
FlowParameters.FLOW_PARAM_MAX_RETRIES, "2"
));
testFlow.setExecutionOptions(options);
ExecutableFlow flowToRestart = ExecutionControllerUtils.getFlowToRestart(testFlow,
Status.PREPARING);
assertNotNull(flowToRestart);
assertEquals(1, flowToRestart.getSystemDefinedRetryCount());
assertEquals(0, flowToRestart.getUserDefinedRetryCount());
assertEquals("2",
flowToRestart
.getExecutionOptions()
.getFlowParameters()
.get(FlowParameters.FLOW_PARAM_MAX_RETRIES));
}
|
Verify for a given job that is routed to a cluster previously, no job link URL is returned when
the connection to RM to validate the RM job link fails.
|
testGetFlowToRestartSuccess_PREPARING
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void testGetFlowToRestartFAIL_ALREADY_MAX_RETRY() throws RuntimeException {
final ExecutableNode node = createExecutableNode("testJob", "spark", null);
ExecutableFlow testFlow = createSingleNodeFlow(node);
ExecutionOptions options = new ExecutionOptions();
options.addAllFlowParameters(ImmutableMap.of(
FlowParameters.FLOW_PARAM_ALLOWED_RETRY_STATUS, "EXECUTION_STOPPED,FAILED",
FlowParameters.FLOW_PARAM_MAX_RETRIES, "0"
));
when(testFlow.getExecutionOptions()).thenReturn(options);
ExecutableFlow flowToRestart = ExecutionControllerUtils.getFlowToRestart(testFlow,
Status.EXECUTION_STOPPED);
assertNull(flowToRestart);
}
|
Verify for a given job that is routed to a cluster previously, no job link URL is returned when
the connection to RM to validate the RM job link fails.
|
testGetFlowToRestartFAIL_ALREADY_MAX_RETRY
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
|
Apache-2.0
|
@Test
public void testLockSuccessSelectAndUpdateExecutionWithLockingWithoutBatch() throws Exception {
when(mysqlNamedLock
.getLock(any(DatabaseTransOperator.class), any(String.class), any(Integer.class)))
.thenReturn(true);
when(mysqlNamedLock.releaseLock(any(DatabaseTransOperator.class), any(String.class)))
.thenReturn(true);
final long currentTime = System.currentTimeMillis();
final ExecutableFlow flow1 = submitNewFlow("exectest1", "exec1", currentTime,
ExecutionOptions.DEFAULT_FLOW_PRIORITY, Status.READY, DispatchMethod.CONTAINERIZED);
assertThat(
this.executionFlowDao.selectAndUpdateExecutionWithLocking(false, 2, Status.READY,
DispatchMethod.CONTAINERIZED).size())
.isEqualTo(1);
Set<Integer> expectedSet = new HashSet<>();
expectedSet.add(flow1.getExecutionId());
assertThat(this.executionFlowDao.selectAndUpdateExecutionWithLocking(false, 2, Status.READY,
DispatchMethod.CONTAINERIZED))
.isEqualTo(expectedSet);
}
|
This test method is written to verify that selectAndUpdateExecutionWithLocking is working as
expected when batch select is disabled and execution status to select READY.
@throws Exception
|
testLockSuccessSelectAndUpdateExecutionWithLockingWithoutBatch
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testLockSuccessSelectAndUpdateExecutionWithLockingWithBatch() throws Exception {
when(mysqlNamedLock
.getLock(any(DatabaseTransOperator.class), any(String.class), any(Integer.class)))
.thenReturn(true);
when(mysqlNamedLock.releaseLock(any(DatabaseTransOperator.class), any(String.class)))
.thenReturn(true);
final long currentTime = System.currentTimeMillis();
final ExecutableFlow flow1 = submitNewFlow("exectest1", "exec1", currentTime,
ExecutionOptions.DEFAULT_FLOW_PRIORITY, Status.READY, DispatchMethod.CONTAINERIZED);
final ExecutableFlow flow2 = submitNewFlow("exectest1", "exec1", currentTime,
ExecutionOptions.DEFAULT_FLOW_PRIORITY, Status.READY, DispatchMethod.CONTAINERIZED);
assertThat(
this.executionFlowDao.selectAndUpdateExecutionWithLocking(true, 2, Status.READY, DispatchMethod.CONTAINERIZED).size())
.isEqualTo(2);
Set<Integer> expectedSet = new HashSet<>();
expectedSet.add(flow1.getExecutionId());
expectedSet.add(flow2.getExecutionId());
assertThat(this.executionFlowDao.selectAndUpdateExecutionWithLocking(true, 2, Status.READY, DispatchMethod.CONTAINERIZED))
.isEqualTo(expectedSet);
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
testLockSuccessSelectAndUpdateExecutionWithLockingWithBatch
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testLockSuccessSelectAndUpdateExecutionWithLockingWithoutLimit() throws Exception {
when(mysqlNamedLock.getLock(any(DatabaseTransOperator.class), any(String.class), any(Integer.class)))
.thenReturn(true);
when(mysqlNamedLock.releaseLock(any(DatabaseTransOperator.class), any(String.class))).thenReturn(true);
final long currentTime = System.currentTimeMillis();
final ExecutableFlow flow1 = submitNewFlow("exectest1", "exec1", currentTime,
ExecutionOptions.DEFAULT_FLOW_PRIORITY, Status.READY, DispatchMethod.CONTAINERIZED);
assertThat(this.executionFlowDao.selectAndUpdateExecutionWithLocking(false,2, Status.READY,
DispatchMethod.CONTAINERIZED).size())
.isEqualTo(1);
Set<Integer> expectedSet = new HashSet<>();
expectedSet.add(flow1.getExecutionId());
assertThat(this.executionFlowDao.selectAndUpdateExecutionWithLocking(false,2, Status.READY,
DispatchMethod.CONTAINERIZED))
.isEqualTo(expectedSet);
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
testLockSuccessSelectAndUpdateExecutionWithLockingWithoutLimit
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testLockSuccessSelectAndUpdateExecutionWithLockingWithLimit() throws Exception {
when(mysqlNamedLock.getLock(any(DatabaseTransOperator.class), any(String.class), any(Integer.class)))
.thenReturn(true);
when(mysqlNamedLock.releaseLock(any(DatabaseTransOperator.class), any(String.class))).thenReturn(true);
final long currentTime = System.currentTimeMillis();
final ExecutableFlow flow1 = submitNewFlow("exectest1", "exec1", currentTime,
ExecutionOptions.DEFAULT_FLOW_PRIORITY, Status.READY, DispatchMethod.CONTAINERIZED);
final ExecutableFlow flow2 = submitNewFlow("exectest1", "exec1", currentTime,
ExecutionOptions.DEFAULT_FLOW_PRIORITY, Status.READY, DispatchMethod.CONTAINERIZED);
assertThat(this.executionFlowDao.selectAndUpdateExecutionWithLocking(true,2, Status.READY,
DispatchMethod.CONTAINERIZED).size())
.isEqualTo(2);
Set<Integer> expectedSet = new HashSet<>();
expectedSet.add(flow1.getExecutionId());
expectedSet.add(flow2.getExecutionId());
assertThat(this.executionFlowDao.selectAndUpdateExecutionWithLocking(true,2, Status.READY,
DispatchMethod.CONTAINERIZED))
.isEqualTo(expectedSet);
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
testLockSuccessSelectAndUpdateExecutionWithLockingWithLimit
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testSelectAndUpdateExecutionWithPriority() throws Exception {
// Selecting executions when DB is empty
assertThat(this.executionFlowDao.selectAndUpdateExecution(-1, true, DispatchMethod.POLL))
.as("Expected no execution selected")
.isEqualTo(-1);
final long currentTime = System.currentTimeMillis();
final ExecutableFlow lowPriorityFlow1 = submitNewFlow("exectest1", "exec1", currentTime,
ExecutionOptions.DEFAULT_FLOW_PRIORITY, DispatchMethod.POLL);
final ExecutableFlow highPriorityFlow = submitNewFlow("exectest1", "exec1", currentTime + 5,
ExecutionOptions.DEFAULT_FLOW_PRIORITY + 5, DispatchMethod.POLL);
final ExecutableFlow lowPriorityFlow2 = submitNewFlow("exectest1", "exec1", currentTime + 10,
ExecutionOptions.DEFAULT_FLOW_PRIORITY + 3, DispatchMethod.POLL);
assertThat(this.executionFlowDao.selectAndUpdateExecution(-1, true, DispatchMethod.POLL))
.as("Expected flow with highest priority")
.isEqualTo(highPriorityFlow.getExecutionId());
assertThat(this.executionFlowDao.selectAndUpdateExecution(-1, true, DispatchMethod.POLL))
.as("Expected second flow with highest priority")
.isEqualTo(lowPriorityFlow2.getExecutionId());
assertThat(this.executionFlowDao.selectAndUpdateExecution(-1, true, DispatchMethod.POLL))
.as("Expected flow with lowest priority")
.isEqualTo(lowPriorityFlow1.getExecutionId());
// Selecting executions when there are no more submitted flows left
assertThat(this.executionFlowDao.selectAndUpdateExecution(-1, true, DispatchMethod.POLL))
.as("Expected no execution selected")
.isEqualTo(-1);
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
testSelectAndUpdateExecutionWithPriority
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testSelectAndUpdateExecutionWithSamePriority() throws Exception {
// Selecting executions when DB is empty
assertThat(this.executionFlowDao.selectAndUpdateExecution(-1, true, DispatchMethod.POLL))
.as("Expected no execution selected")
.isEqualTo(-1);
final long currentTime = System.currentTimeMillis();
final ExecutableFlow submittedFlow1 = submitNewFlow("exectest1", "exec1", currentTime,
ExecutionOptions.DEFAULT_FLOW_PRIORITY + 3, DispatchMethod.POLL);
final ExecutableFlow submittedFlow2 = submitNewFlow("exectest1", "exec1", currentTime + 5,
ExecutionOptions.DEFAULT_FLOW_PRIORITY + 3, DispatchMethod.POLL);
final ExecutableFlow submittedFlow3 = submitNewFlow("exectest1", "exec1", currentTime + 10,
ExecutionOptions.DEFAULT_FLOW_PRIORITY + 3, DispatchMethod.POLL);
assertThat(this.executionFlowDao.selectAndUpdateExecution(-1, true, DispatchMethod.POLL))
.as("Expected first flow submitted")
.isEqualTo(submittedFlow1.getExecutionId());
assertThat(this.executionFlowDao.selectAndUpdateExecution(-1, true, DispatchMethod.POLL))
.as("Expected second flow submitted")
.isEqualTo(submittedFlow2.getExecutionId());
assertThat(this.executionFlowDao.selectAndUpdateExecution(-1, true, DispatchMethod.POLL))
.as("Expected last flow submitted")
.isEqualTo(submittedFlow3.getExecutionId());
// Selecting executions when there are no more submitted flows left
assertThat(this.executionFlowDao.selectAndUpdateExecution(-1, true, DispatchMethod.POLL))
.as("Expected no execution selected")
.isEqualTo(-1);
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
testSelectAndUpdateExecutionWithSamePriority
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testFlowStatusWithFetchExecutableFlows() throws Exception {
final ExecutableFlow flow = submitNewFlow("exectest1", "exec1",
System.currentTimeMillis(), ExecutionOptions.DEFAULT_FLOW_PRIORITY, DispatchMethod.POLL);
int execId = flow.getExecutionId();
makeFlowStatusInconsistent(execId, Status.FAILED_FINISHING);
final ExecutableFlow fetchedFlow = this.executionFlowDao.fetchExecutableFlow(execId);
assertThat(fetchedFlow.getStatus()).isEqualTo(Status.FAILED_FINISHING);
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
testFlowStatusWithFetchExecutableFlows
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testFlowStatusWithFetchFlowHistory() throws Exception {
final ExecutableFlow flow = submitNewFlow("exectest1", "exec1",
System.currentTimeMillis(), ExecutionOptions.DEFAULT_FLOW_PRIORITY, DispatchMethod.POLL);
int execId = flow.getExecutionId();
makeFlowStatusInconsistent(execId, Status.FAILED_FINISHING);
// fetch all executions
final List<ExecutableFlow> flowList1 = this.executionFlowDao.fetchFlowHistory(0, 2);
assertThat(flowList1).isNotEmpty();
assertThat(flowList1.get(0).getStatus()).isEqualTo(Status.FAILED_FINISHING);
assertThat(flowList1.get(0).getExecutionId()).isEqualTo(execId);
// fetch executions of a flow
final List<ExecutableFlow> flowList2 = this.executionFlowDao
.fetchFlowHistory(flow.getProjectId(), flow.getId(), 0, 3);
assertThat(flowList2).isNotEmpty();
assertThat(flowList2.get(0).getStatus()).isEqualTo(Status.FAILED_FINISHING);
assertThat(flowList2.get(0).getExecutionId()).isEqualTo(execId);
// fetch flow executions by status
final List<ExecutableFlow> flowList3 = this.executionFlowDao.fetchFlowHistory(
flow.getProjectId(), flow.getId(), 0, 3, Status.FAILED_FINISHING);
assertThat(flowList3).isNotEmpty();
assertThat(flowList3.get(0).getExecutionId()).isEqualTo(execId);
// fetch flow executions by multiple filters
final List<ExecutableFlow> flowList4 = this.executionFlowDao
.fetchFlowHistory("", "data", "", 0, -1, -1, 0, 16);
assertThat(flowList4).isNotEmpty();
assertThat(flowList4.get(0).getStatus()).isEqualTo(Status.FAILED_FINISHING);
assertThat(flowList4.get(0).getExecutionId()).isEqualTo(execId);
final ExecutableFlow fetchedFlow = this.executionFlowDao.fetchExecutableFlow(execId);
assertTwoFlowSame(fetchedFlow, flowList1.get(0));
assertTwoFlowSame(fetchedFlow, flowList2.get(0));
assertTwoFlowSame(fetchedFlow, flowList3.get(0));
assertTwoFlowSame(fetchedFlow, flowList4.get(0));
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
testFlowStatusWithFetchFlowHistory
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void fetchFlowStatusWithFlowHistoryByStartTime() throws Exception {
final ExecutableFlow flow1 =
createExecution(TimeUtils.convertDateTimeToUTCMillis("2018-09-01 10:00:00"),
DispatchMethod.POLL, Status.PREPARING);
final ExecutableFlow flow2 =
createExecution(TimeUtils.convertDateTimeToUTCMillis("2018-09-01 09:00:00"),
DispatchMethod.POLL, Status.PREPARING);
final ExecutableFlow flow3 =
createExecution(TimeUtils.convertDateTimeToUTCMillis("2018-09-01 09:00:00"),
DispatchMethod.POLL, Status.PREPARING);
final ExecutableFlow flow4 =
createExecution(TimeUtils.convertDateTimeToUTCMillis("2018-09-01 08:00:00"),
DispatchMethod.POLL, Status.QUEUED);
makeFlowStatusInconsistent(flow1.getExecutionId(), Status.FAILED_FINISHING);
final List<ExecutableFlow> flowList = this.executionFlowDao.fetchFlowHistory(
flow1.getProjectId(), flow1.getFlowId(),
TimeUtils.convertDateTimeToUTCMillis("2018-09-01 08:00:00"));
assertThat(flowList).hasSize(4);
assertThat(flowList.get(0).getExecutionId()).isEqualTo(flow1.getExecutionId());
assertThat(flowList.get(0).getStatus()).isEqualTo(Status.FAILED_FINISHING);
assertThat(flowList.get(3).getExecutionId()).isEqualTo(flow4.getExecutionId());
assertThat(flowList.get(3).getStatus()).isEqualTo(Status.QUEUED);
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
fetchFlowStatusWithFlowHistoryByStartTime
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testFlowStatusWithFetchUnfinishedFlows() throws Exception {
final ExecutableFlow flow1 = createExecutionAndAssign(Status.QUEUED, DispatchMethod.POLL,
this.executorDao.addExecutor("test", 1));
final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> unfinishedFlows1 =
this.fetchActiveFlowDao.fetchUnfinishedFlows();
assertThat(unfinishedFlows1.containsKey(flow1.getExecutionId())).isTrue();
assertThat(unfinishedFlows1.get(flow1.getExecutionId()).getSecond().getStatus())
.isEqualTo(Status.QUEUED);
makeFlowStatusInconsistent(flow1.getExecutionId(), Status.FAILED_FINISHING);
final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> unfinishedFlows2 =
this.fetchActiveFlowDao.fetchUnfinishedFlows();
assertThat(unfinishedFlows2.containsKey(flow1.getExecutionId())).isTrue();
assertThat(unfinishedFlows2.get(flow1.getExecutionId()).getSecond().getStatus())
.isEqualTo(Status.FAILED_FINISHING);
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
testFlowStatusWithFetchUnfinishedFlows
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testFlowStatusWithFetchActiveFlows() throws Exception {
final ExecutableFlow flow1 = createExecutionAndAssign(Status.RUNNING, DispatchMethod.POLL,
this.executorDao.addExecutor("test", 1));
final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> activeFlows1 =
this.fetchActiveFlowDao.fetchActiveFlows(DispatchMethod.POLL);
assertThat(activeFlows1.containsKey(flow1.getExecutionId())).isTrue();
assertThat(activeFlows1.get(flow1.getExecutionId()).getSecond().getStatus())
.isEqualTo(Status.RUNNING);
makeFlowStatusInconsistent(flow1.getExecutionId(), Status.KILLING);
final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> activeFlows2 =
this.fetchActiveFlowDao.fetchActiveFlows(DispatchMethod.POLL);
assertThat(activeFlows2.containsKey(flow1.getExecutionId())).isTrue();
assertThat(activeFlows2.get(flow1.getExecutionId()).getSecond().getStatus())
.isEqualTo(Status.KILLING);
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
testFlowStatusWithFetchActiveFlows
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testFlowStatusWithFetchActiveFlowByExecId() throws Exception {
final ExecutableFlow flow1 = createExecutionAndAssign(Status.RUNNING, DispatchMethod.POLL,
this.executorDao.addExecutor("test", 1));
Pair<ExecutionReference, ExecutableFlow> activeFlow1 =
this.fetchActiveFlowDao.fetchActiveFlowByExecId(flow1.getExecutionId());
assertThat(activeFlow1).isNotNull();
assertTwoFlowSame(activeFlow1.getSecond(), flow1);
makeFlowStatusInconsistent(flow1.getExecutionId(), Status.KILLING);
Pair<ExecutionReference, ExecutableFlow> activeFlow2 =
this.fetchActiveFlowDao.fetchActiveFlowByExecId(flow1.getExecutionId());
assertThat(activeFlow2).isNotNull();
assertThat(activeFlow2.getSecond().getStatus()).isEqualTo(Status.KILLING);
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
testFlowStatusWithFetchActiveFlowByExecId
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testFlowStatusWithFetchQueuedFlows() throws Exception {
final ExecutableFlow flow1 = submitNewFlow("exectest1", "exec1", System.currentTimeMillis(),
ExecutionOptions.DEFAULT_FLOW_PRIORITY, DispatchMethod.POLL);
flow1.setStatus(Status.RUNNING);
this.executionFlowDao.updateExecutableFlow(flow1);
final List<Pair<ExecutionReference, ExecutableFlow>> fetchedQueuedFlows1 =
this.executionFlowDao.fetchQueuedFlows(Status.PREPARING);
assertThat(fetchedQueuedFlows1).isEmpty();
makeFlowStatusInconsistent(flow1.getExecutionId(), Status.PREPARING);
final List<Pair<ExecutionReference, ExecutableFlow>> fetchedQueuedFlows2 =
this.executionFlowDao.fetchQueuedFlows(Status.PREPARING);
assertThat(fetchedQueuedFlows2).isNotEmpty();
assertThat(fetchedQueuedFlows2.get(0).getSecond().getStatus()).isEqualTo(Status.PREPARING);
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
testFlowStatusWithFetchQueuedFlows
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testFlowStatusWithFetchRecentlyFinishedFlows() throws Exception {
final ExecutableFlow flow1 =
createExecution(System.currentTimeMillis(), DispatchMethod.POLL, Status.SUCCEEDED);
flow1.setEndTime(System.currentTimeMillis());
this.executionFlowDao.updateExecutableFlow(flow1);
final List<ExecutableFlow> finishedFlows1 = this.executionFlowDao.fetchRecentlyFinishedFlows(
RECENTLY_FINISHED_LIFETIME);
assertThat(finishedFlows1).isNotEmpty();
assertTwoFlowSame(flow1, finishedFlows1.get(0));
makeFlowStatusInconsistent(flow1.getExecutionId(), Status.FAILED);
final List<ExecutableFlow> finishedFlows2 = this.executionFlowDao.fetchRecentlyFinishedFlows(
RECENTLY_FINISHED_LIFETIME);
assertThat(finishedFlows2).isNotEmpty();
assertThat(finishedFlows2.get(0).getStatus()).isEqualTo(Status.FAILED);
}
|
This method is used to verify that selectAndUpdateExecutionWithLocking is working as expected
when batch is enabled and execution status to select is READY.
@throws Exception
|
testFlowStatusWithFetchRecentlyFinishedFlows
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testUpdateExecutableFlowNullSLAOptions() throws Exception {
final ExecutableFlow flow = createTestFlow();
this.executionFlowDao.uploadExecutableFlow(flow);
final ExecutableFlow fetchFlow =
this.executionFlowDao.fetchExecutableFlow(flow.getExecutionId());
// set null sla option
fetchFlow.getExecutionOptions().setSlaOptions(null);
// Try updating flow
try {
this.executionFlowDao.updateExecutableFlow(fetchFlow);
} catch (ExecutorManagerException e) {
assert e.getMessage().contains("NPE");
}
// Fetch flow again, the status must be READY not PREPARING as NPE is handled properly when
//flow object is serialized.
final ExecutableFlow readyFlow =
this.executionFlowDao.fetchExecutableFlow(fetchFlow.getExecutionId());
assertThat(readyFlow.getStatus()).isEqualTo(Status.READY);
}
|
Test the resiliency of ExecutableFlow when Sla Option is set to NULL.
Make sure that the serialization of flow object does not break and the flow proceeds to
next valid state.
|
testUpdateExecutableFlowNullSLAOptions
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testUpdateExecutionFlowVersionSet() throws Exception {
final ExecutableFlow flow = createTestFlow();
this.executionFlowDao.uploadExecutableFlow(flow);
flow.setVersionSet(createVersionSet());
this.executionFlowDao.updateExecutableFlow(flow);
final ExecutableFlow fetchFlow =
this.executionFlowDao.fetchExecutableFlow(flow.getExecutionId());
Assert.assertTrue(fetchFlow.getVersionSet() != null);
assertThat(flow.getVersionSet().getImageToVersionMap()).isEqualTo(fetchFlow.getVersionSet().getImageToVersionMap());
}
|
Test when an executable flow sets its version set field, the information can be retrieved
after updateExecutableFlow and fetchExecutableFlow
@throws Exception
|
testUpdateExecutionFlowVersionSet
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
@Test
public void testGetTerminalStatusesString() throws Exception {
final String target = "50, 60, 65, 70";
Assert.assertTrue(this.fetchActiveFlowDao.getTerminalStatusesString().equals(target));
}
|
Test generating a string representing terminating flow statuses
@throws Exception
|
testGetTerminalStatusesString
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
private void makeFlowStatusInconsistent(int executionId, Status status) {
final String MODIFY_FLOW_STATUS = "UPDATE execution_flows SET status=? WHERE exec_id=?";
try {
dbOperator.update(MODIFY_FLOW_STATUS, status.getNumVal(), executionId);
} catch (SQLException e) {
e.printStackTrace();
}
}
|
Test generating a string representing terminating flow statuses
@throws Exception
|
makeFlowStatusInconsistent
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
private ExecutableFlow submitNewFlow(final String projectName, final String flowName,
final long submitTime, final int flowPriority, DispatchMethod dispatchMethod) throws IOException,
ExecutorManagerException {
return submitNewFlow(projectName, flowName, submitTime, flowPriority, Status.PREPARING,
Optional.empty(), dispatchMethod);
}
|
Test generating a string representing terminating flow statuses
@throws Exception
|
submitNewFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
private ExecutableFlow submitNewFlow(final String projectName, final String flowName,
final long submitTime, final int flowPriority, final Status status, DispatchMethod dispatchMethod) throws IOException,
ExecutorManagerException {
return submitNewFlow(projectName, flowName, submitTime, flowPriority, status,
Optional.empty(), dispatchMethod);
}
|
Test generating a string representing terminating flow statuses
@throws Exception
|
submitNewFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
private ExecutableFlow submitNewFlow(final String projectName, final String flowName,
final long submitTime, final int flowPriority, final Status status,
Optional<Long> startTime, DispatchMethod dispatchMethod) throws IOException,
ExecutorManagerException {
final ExecutableFlow flow = TestUtils.createTestExecutableFlow(projectName, flowName, DispatchMethod.POLL);
flow.setStatus(status);
flow.setSubmitTime(submitTime);
flow.setSubmitUser("testUser");
flow.setDispatchMethod(dispatchMethod);
flow.getExecutionOptions().getFlowParameters().put(ExecutionOptions.FLOW_PRIORITY,
String.valueOf(flowPriority));
startTime.ifPresent(st -> flow.setStartTime(st));
this.executionFlowDao.uploadExecutableFlow(flow);
return flow;
}
|
Test generating a string representing terminating flow statuses
@throws Exception
|
submitNewFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
private void assertTwoFlowSame(final ExecutableFlow flow1, final ExecutableFlow flow2) {
assertTwoFlowSame(flow1, flow2, true);
}
|
Test generating a string representing terminating flow statuses
@throws Exception
|
assertTwoFlowSame
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
private void assertTwoFlowSame(final ExecutableFlow flow1, final ExecutableFlow flow2,
final boolean compareFlowData) {
assertThat(flow1.getExecutionId()).isEqualTo(flow2.getExecutionId());
assertThat(flow1.getStatus()).isEqualTo(flow2.getStatus());
assertThat(flow1.getEndTime()).isEqualTo(flow2.getEndTime());
assertThat(flow1.getStartTime()).isEqualTo(flow2.getStartTime());
assertThat(flow1.getSubmitTime()).isEqualTo(flow2.getSubmitTime());
assertThat(flow1.getFlowId()).isEqualTo(flow2.getFlowId());
assertThat(flow1.getProjectId()).isEqualTo(flow2.getProjectId());
assertThat(flow1.getVersion()).isEqualTo(flow2.getVersion());
assertThat(flow1.getSubmitUser()).isEqualTo(flow2.getSubmitUser());
if (compareFlowData) {
assertThat(flow1.getExecutionOptions().getFailureAction())
.isEqualTo(flow2.getExecutionOptions().getFailureAction());
assertThat(new HashSet<>(flow1.getEndNodes())).isEqualTo(new HashSet<>(flow2.getEndNodes()));
}
}
|
Test generating a string representing terminating flow statuses
@throws Exception
|
assertTwoFlowSame
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
private VersionSet createVersionSet(){
final String testJsonString1 = "{\"azkaban-base\":{\"version\":\"7.0.4\",\"path\":\"path1\","
+ "\"state\":\"ACTIVE\"},\"azkaban-config\":{\"version\":\"9.1.1\",\"path\":\"path2\","
+ "\"state\":\"ACTIVE\"},\"spark\":{\"version\":\"8.0\",\"path\":\"path3\","
+ "\"state\":\"ACTIVE\"}}";
final String testMd5Hex1 = "43966138aebfdc4438520cc5cd2aefa8";
return new VersionSet(testJsonString1, testMd5Hex1, 1);
}
|
Create a version set from scratch
@return a new version set
|
createVersionSet
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
|
Apache-2.0
|
private static void setupTlsEnabledServer() throws Exception {
final SslSocketConnector secureConnector = new SslSocketConnector();
secureConnector.setPort(JETTY_TLS_PORT);
secureConnector.setKeystore(KEYSTORE_PATH);
secureConnector.setPassword(DEFAULT_PASSWORD);
secureConnector.setKeyPassword(DEFAULT_PASSWORD);
secureConnector.setTruststore(TRUSTSTORE_PATH);
secureConnector.setTrustPassword(DEFAULT_PASSWORD);
final QueuedThreadPool queuedThreadPool = new QueuedThreadPool(2);
tlsEnabledServer = new Server();
tlsEnabledServer.setThreadPool(queuedThreadPool);
tlsEnabledServer.addConnector(secureConnector);
final Context root = new Context(tlsEnabledServer, "/", Context.SESSIONS);
root.addServlet(new ServletHolder(new SimpleServlet()), "/simple");
tlsEnabledServer.start();
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
setupTlsEnabledServer
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@BeforeClass
public static void setUp() throws Exception {
tlsEnabledProps = new Props();
tlsEnabledProps.put(EXECUTOR_CLIENT_TLS_ENABLED, "true");
tlsEnabledProps
.put(EXECUTOR_CLIENT_TRUSTSTORE_PATH, ExecutorApiClient.class.getResource("test-cacerts").getPath());
tlsEnabledProps.put(EXECUTOR_CLIENT_TRUSTSTORE_PASSWORD, "changeit");
setupTlsEnabledServer();
validReverseProxyProps = new Props();
validReverseProxyProps.put(AZKABAN_EXECUTOR_REVERSE_PROXY_ENABLED, "true");
validReverseProxyProps.put(AZKABAN_EXECUTOR_REVERSE_PROXY_HOSTNAME, REVERSE_PROXY_HOST);
validReverseProxyProps.put(AZKABAN_EXECUTOR_REVERSE_PROXY_PORT, REVERSE_PROXY_PORT);
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
setUp
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Test
public void testTlsEnabledApiClient() {
final ExecutorApiClient tlsEnabledClient = new ExecutorApiClient(this.tlsEnabledProps);
Assert.assertNotNull(tlsEnabledClient);
Assert.assertNotNull(tlsEnabledClient.getTlsSocketFactory());
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
testTlsEnabledApiClient
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Test
public void testPostResponse() throws Exception {
final ExecutorApiClient tlsEnabledClient = new ExecutorApiClient(this.tlsEnabledProps);
final String postResponse = tlsEnabledClient
.doPost(new URI(SimpleServlet.TLS_ENABLED_URI), DispatchMethod.CONTAINERIZED,
Optional.empty(),null);
Assert.assertEquals(SimpleServlet.POST_RESPONSE_STRING, postResponse);
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
testPostResponse
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Test
public void testDoPostCall() throws Exception {
final ExecutorApiClient tlsEnabledClient = new ExecutorApiClient(this.tlsEnabledProps);
ExecutorApiClient spyTlsEnabledClient = Mockito.spy(tlsEnabledClient);
final String postResponse = spyTlsEnabledClient
.doPost(new URI(SimpleServlet.TLS_ENABLED_URI), DispatchMethod.CONTAINERIZED,
Optional.empty(),null);
Assert.assertEquals(SimpleServlet.POST_RESPONSE_STRING, postResponse);
Mockito.verify(spyTlsEnabledClient, Mockito.times(1)).httpsPost(Mockito.any(),
Mockito.any(), Mockito.any());
Mockito.verify(spyTlsEnabledClient, Mockito.times(0)).httpPost(Mockito.any(),
Mockito.any(), Mockito.any());
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
testDoPostCall
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Test
public void testGetResponse() throws Exception {
// Currently ExecutorApiClient does not make any POST requests.
// This is for sanity testing that TLS enabled http-client continues working as expected with
// GET requests as well.
final ExecutorApiClient tlsEnabledClient = new ExecutorApiClient(this.tlsEnabledProps);
final HttpClient httpClient = tlsEnabledClient.createHttpsClient(Optional.empty());
final HttpGet httpGet = new HttpGet(SimpleServlet.TLS_ENABLED_URI);
final HttpResponse httpResponse = httpClient.execute(httpGet);
Assert.assertNotNull(httpResponse);
final String getResponse = EntityUtils.toString(httpResponse.getEntity());
Assert.assertEquals(SimpleServlet.GET_RESPONSE_STRING, getResponse);
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
testGetResponse
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Test
public void testCreateDefaultExecutorApiClient() {
final ExecutorApiClient tlsDisabledClient = new ExecutorApiClient(new Props());
Assert.assertNotNull(tlsDisabledClient);
Assert.assertNull(tlsDisabledClient.getTlsSocketFactory());
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
testCreateDefaultExecutorApiClient
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Test(expected = SSLHandshakeException.class)
public void testFailureWithClientTlsDisabled() throws Exception {
final ExecutorApiClient tlsDisabledClient = new ExecutorApiClient(new Props());
// this should throw SSLHandshakeException
final String postResponse = tlsDisabledClient.
httpPost(new URI(SimpleServlet.TLS_ENABLED_URI), Optional.empty(), null);
Assert.fail();
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
testFailureWithClientTlsDisabled
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Test(expected = SSLHandshakeException.class)
public void testFailureWithInvalidCerts() throws Exception {
final Props tlsPropsWithInvalidCert = new Props(tlsEnabledProps);
tlsPropsWithInvalidCert.put(EXECUTOR_CLIENT_TRUSTSTORE_PATH,
ExecutorApiClient.class.getResource("invalid-cacerts").getPath());
final ExecutorApiClient tlsEnabledClient = new ExecutorApiClient(tlsPropsWithInvalidCert);
// this should throw SSLHandshakeException
final String postResponse = tlsEnabledClient
.httpPost(new URI(SimpleServlet.TLS_ENABLED_URI), Optional.empty(), null);
Assert.fail();
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
testFailureWithInvalidCerts
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Test
public void testReverseProxyValidProperties() {
final Props validProps = new Props(validReverseProxyProps);
final ExecutorApiClient validClient = new ExecutorApiClient(validProps);
Assert.assertEquals(true, validClient.isReverseProxyEnabled());
Assert.assertEquals(REVERSE_PROXY_HOST, validClient.getReverseProxyHost().get());
Assert.assertEquals(REVERSE_PROXY_PORT, validClient.getReverseProxyPort().get().intValue());
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
testReverseProxyValidProperties
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Test
public void testReverseProxyMissingProperties() {
final Props invalidProps = new Props();
invalidProps.put(AZKABAN_EXECUTOR_REVERSE_PROXY_ENABLED, "true");
// missing reverse proxy host
try {
final ExecutorApiClient client = new ExecutorApiClient(invalidProps);
Assert.fail();
} catch (UndefinedPropertyException upe) {
Assert.assertTrue(upe.getMessage().contains(AZKABAN_EXECUTOR_REVERSE_PROXY_HOSTNAME));
}
invalidProps.put(AZKABAN_EXECUTOR_REVERSE_PROXY_HOSTNAME, REVERSE_PROXY_HOST);
// missing reverse proxy port
try {
final ExecutorApiClient client = new ExecutorApiClient(invalidProps);
Assert.fail();
} catch (UndefinedPropertyException upe) {
Assert.assertTrue(upe.getMessage().contains(AZKABAN_EXECUTOR_REVERSE_PROXY_PORT));
}
// sanity check for success
invalidProps.put(AZKABAN_EXECUTOR_REVERSE_PROXY_PORT, REVERSE_PROXY_PORT);
try {
final ExecutorApiClient client = new ExecutorApiClient(invalidProps);
Assert.assertNotNull(client);
} catch (UndefinedPropertyException upe) {
Assert.fail();
}
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
testReverseProxyMissingProperties
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Test
public void testBuildUriWithoutReverseProxy() throws Exception {
final ExecutorApiClient client = new ExecutorApiClient(new Props());
URI uri = client.buildExecutorUri("localhost", JETTY_TLS_PORT, "executor",true, null,
(Pair<String,String>[])null);
Assert.assertEquals("http://localhost:" + JETTY_TLS_PORT+ "/executor", uri.toString());
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
testBuildUriWithoutReverseProxy
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Test
public void testBuildUriWithReverseProxy() throws Exception {
final ExecutorApiClient client = new ExecutorApiClient(validReverseProxyProps);
URI uri = client.buildExecutorUri(null, 0, "execid-101/container",false, DispatchMethod.CONTAINERIZED,
(Pair<String,String>[])null);
Assert.assertEquals("http://" + REVERSE_PROXY_HOST + ":" + REVERSE_PROXY_PORT +
"/execid-101/container",
uri.toString());
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
testBuildUriWithReverseProxy
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Override
protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
resp.getWriter().print(this.GET_RESPONSE_STRING);
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
doGet
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Override
protected void doPost(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
resp.getWriter().print(this.POST_RESPONSE_STRING);
}
|
For testing of ExecutorApiClient. This is currently focused on sanity testing TLS client side
support introduced within the ExecutorApiClient. The core functionality of ExecutorApiClient is
already expected to be tested through other means, such as through ExecutorApiGateway tests.
Highlights:
<p> 1. Launches a TLS enabled web server (https)
<p> 2. Creates ExecutorApiClient with TLS settings configured through properties
<p> 3. Verifies GET and POST requests against the TLS enabled server
<p> 4. Includes negative tests where the truststore certs don't match the private key
<p> 5. Uses pre-packaged keys and certs in a keystore and truststore.
|
doPost
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@AfterClass
public static void stop() throws Exception {
tlsEnabledServer.stop();
}
|
This method is used to stop tls enabled jetty server.
@throws Exception
|
stop
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
|
Apache-2.0
|
@Before
public void setUp() throws Exception {
this.props = new Props();
this.props.put(ConfigurationKeys.AZKABAN_EXECUTOR_MAX_FAILURE_COUNT, 2);
this.props.put(ConfigurationKeys.AZKABAN_ADMIN_ALERT_EMAIL, AZ_ADMIN_ALERT_EMAIL);
this.loader = mock(ExecutorLoader.class);
this.mailAlerter = mock(Emailer.class);
this.alerterHolder = new AlerterHolder(this.props, (Emailer) this.mailAlerter);
this.apiGateway = mock(ExecutorApiGateway.class);
this.executorHealthChecker = new ExecutorHealthChecker(this.props, this.loader, this
.apiGateway, this.alerterHolder);
this.flow1 = TestUtils.createTestExecutableFlow("exectest1", "exec1", DispatchMethod.POLL);
this.flow1.getExecutionOptions().setFailureEmails(Arrays.asList(FLOW_ADMIN_EMAIL.split(",")));
this.flow1.setExecutionId(EXECUTION_ID_11);
this.flow1.setStatus(Status.RUNNING);
this.flow2 = TestUtils.createTestExecutableFlow("exectest1", "exec2", DispatchMethod.POLL);
this.flow2.setExecutionId(EXECUTION_ID_12);
this.flow2.setStatus(Status.RUNNING);
this.executor1 = new Executor(1, "localhost", 12345, true);
this.executor2 = new Executor(2, "localhost", 5678, true);
when(this.loader.fetchActiveFlows(any())).thenReturn(this.activeFlows);
}
|
Test case for executor health checker.
|
setUp
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
Apache-2.0
|
@Test
public void checkExecutorHealthAlive() throws Exception {
this.activeFlows.put(EXECUTION_ID_11, new Pair<>(
new ExecutionReference(EXECUTION_ID_11, this.executor1, DispatchMethod.POLL), this.flow1));
when(this.apiGateway.callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(),
ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))).thenReturn(ImmutableMap.of(ConnectorParams
.STATUS_PARAM, ConnectorParams.RESPONSE_ALIVE));
this.executorHealthChecker.checkExecutorHealth();
assertThat(this.flow1.getStatus()).isEqualTo(Status.RUNNING);
verifyZeroInteractions(this.mailAlerter);
}
|
Test running flow is not finalized and alert email is not sent when executor is alive.
|
checkExecutorHealthAlive
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
Apache-2.0
|
@Test
public void checkExecutorHealthExecutorIdRemoved() throws Exception {
this.activeFlows.put(EXECUTION_ID_11, new Pair<>(
new ExecutionReference(EXECUTION_ID_11, null, DispatchMethod.POLL), this.flow1));
when(this.loader.fetchExecutableFlow(EXECUTION_ID_11)).thenReturn(this.flow1);
this.executorHealthChecker.checkExecutorHealth();
verify(this.loader).updateExecutableFlow(this.flow1);
assertThat(this.flow1.getStatus()).isEqualTo(Status.FAILED);
}
|
Test running flow is finalized when its executor is removed from DB.
|
checkExecutorHealthExecutorIdRemoved
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
Apache-2.0
|
@Test
public void checkExecutorHealthConsecutiveFailures() throws Exception {
// By default mocked methods will return an empty collection.
// Therefore underlying call to apiGateway.callWithExecutionId returns an empty Map for all
// invocations of executorHealthChecker.checkExecutorHealth() in this test.
this.activeFlows.put(EXECUTION_ID_11, new Pair<>(
new ExecutionReference(EXECUTION_ID_11, this.executor1, DispatchMethod.POLL), this.flow1));
// Failed to ping executor. Failure count (=1) < MAX_FAILURE_COUNT (=2). Do not alert.
this.executorHealthChecker.checkExecutorHealth();
verify(this.apiGateway).callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(),
ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000));
verifyZeroInteractions(this.mailAlerter);
// Pinged executor successfully. Failure count (=0) < MAX_FAILURE_COUNT (=2). Do not alert.
when(this.apiGateway.callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(),
ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))).thenReturn(ImmutableMap.of(ConnectorParams
.STATUS_PARAM, ConnectorParams.RESPONSE_ALIVE));
this.executorHealthChecker.checkExecutorHealth();
verifyZeroInteractions(this.mailAlerter);
// Failed to ping executor. Failure count (=1) < MAX_FAILURE_COUNT (=2). Do not alert.
when(this.apiGateway.callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(),
ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))).thenReturn(null);
this.executorHealthChecker.checkExecutorHealth();
verifyZeroInteractions(this.mailAlerter);
// Failed to ping executor again. Failure count (=2) = MAX_FAILURE_COUNT (=2). Alert AZ admin.
when(this.loader.fetchExecutableFlow(flow1.getExecutionId())).thenReturn(flow1);
this.executorHealthChecker.checkExecutorHealth();
verify(this.mailAlerter, times(1)).alertOnFailedExecutorHealthCheck(eq(this.executor1),
eq(Arrays.asList(this.flow1)), any(ExecutorManagerException.class),
eq(Arrays.asList(AZ_ADMIN_ALERT_EMAIL.split(","))));
// Verify remediation tasks are performed for unreachable executors.
// Flow should be finalized with alerts sent over email.
assertThat(this.flow1.getStatus()).isEqualTo(Status.FAILED);
String expectedReason = "Executor was unreachable, executor-id: 1, executor-host: localhost, "
+ "executor-port: 12345";
verify(this.mailAlerter, times(1)).alertOnError(eq(flow1), eq(expectedReason));
}
|
Test alert emails are sent when there are consecutive failures to contact the executor.
|
checkExecutorHealthConsecutiveFailures
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
Apache-2.0
|
@Test
public void testCheckExecutorHealthWrapperExceptionHandling() throws Exception {
this.activeFlows.put(EXECUTION_ID_11, new Pair<>(
new ExecutionReference(EXECUTION_ID_11, this.executor1, DispatchMethod.POLL), this.flow1));
when(this.apiGateway.callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(),
ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))).thenThrow(new RuntimeException("test "
+ "exception"));
// this will throw, causing the test to fail in case the error is not caught correctly
this.executorHealthChecker.checkExecutorHealthQuietly();
verifyZeroInteractions(this.mailAlerter);
}
|
Test that the wrapper routine swallows any exceptions reported by underlying health checker.
|
testCheckExecutorHealthWrapperExceptionHandling
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
Apache-2.0
|
@Test
public void testFailureDuringExecutorPing() throws Exception {
this.activeFlows.put(EXECUTION_ID_11, new Pair<>(
new ExecutionReference(EXECUTION_ID_11, this.executor1, DispatchMethod.POLL), this.flow1));
this.activeFlows.put(EXECUTION_ID_12, new Pair<>(
new ExecutionReference(EXECUTION_ID_12, this.executor2, DispatchMethod.POLL), this.flow2));
// Throw a runtime exception for both executors.
when(this.apiGateway.callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(),
ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))).thenThrow(new RuntimeException("test "
+ "exception"));
when(this.apiGateway.callWithExecutionId(this.executor2.getHost(), this.executor2.getPort(),
ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))).thenThrow(new RuntimeException("test "
+ "exception"));
this.executorHealthChecker.checkExecutorHealth();
// Verify ping API is called for both executors. Implying that runtime exception for one of the
// executors did not prevent the check on other executor.
verify(this.apiGateway).callWithExecutionId(this.executor1.getHost(),
this.executor1.getPort(), ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000));
verify(this.apiGateway).callWithExecutionId(this.executor2.getHost(),
this.executor2.getPort(), ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000));
verifyZeroInteractions(this.mailAlerter);
}
|
Test that runtime exceptions from the Ping API for one executor don't prevent healthchecks on
other executors.
|
testFailureDuringExecutorPing
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
Apache-2.0
|
@Test
public void testFailureDuringAlerting() throws Exception {
this.activeFlows.clear();
this.activeFlows.put(EXECUTION_ID_11, new Pair<>(
new ExecutionReference(EXECUTION_ID_11, this.executor1, DispatchMethod.POLL), this.flow1));
// Force a failure of the executor ping API
ExecutorManagerException healthcheckException = new ExecutorManagerException("test exception");
when(this.apiGateway.callWithExecutionId(
this.executor1.getHost(),
this.executor1.getPort(),
ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000)))
.thenThrow(healthcheckException);
// Force an unchecked exception when sending alert emails for the healthcheck failure
// Note that we can't use this.alerterHolder.get("email") in the when() as mockito
// doesn't like nested mocks.
doThrow(new RuntimeException("test runtime exception"))
.when(this.mailAlerter)
.alertOnFailedExecutorHealthCheck(
this.executor1,
Arrays.asList(this.flow1),
healthcheckException,
Arrays.asList(AZ_ADMIN_ALERT_EMAIL.split(",")));
when(this.loader.fetchExecutableFlow(EXECUTION_ID_11)).thenReturn(this.flow1);
for (int failureCount = 0;
failureCount < props.getInt(ConfigurationKeys.AZKABAN_EXECUTOR_MAX_FAILURE_COUNT);
failureCount++) {
this.executorHealthChecker.checkExecutorHealth();
}
// Confirm that cleanup for the executor is attempted despite failure to send emails.
// verify() can't be called on executorHealthCheck.cleanUpForMissingExecutor as it's not being
// mocked. Directly checking the flow update through the mocked 'loader' is a suitable proxy
// for this.
verify(this.loader).updateExecutableFlow(this.flow1);
assertThat(this.flow1.getStatus()).isEqualTo(Status.FAILED);
}
|
Test that any failures while sending alerts for unreachable executors don't prevent the
finalization(cleanup) of flows running on that executor.
|
testFailureDuringAlerting
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
Apache-2.0
|
@Test
public void testFailureDuringFinalization() throws Exception {
this.activeFlows.put(EXECUTION_ID_11, new Pair<>(
new ExecutionReference(EXECUTION_ID_11, this.executor1, DispatchMethod.POLL), this.flow1));
this.activeFlows.put(EXECUTION_ID_12, new Pair<>(
new ExecutionReference(EXECUTION_ID_12, this.executor1, DispatchMethod.POLL), this.flow2));
when(this.loader.fetchExecutableFlow(EXECUTION_ID_11)).thenThrow(new RuntimeException(
"test runtime exception"));
when(this.loader.fetchExecutableFlow(EXECUTION_ID_12)).thenThrow(new RuntimeException(
"test runtime exception"));
this.executorHealthChecker.finalizeFlows(ImmutableList.of(this.flow1, flow2),
"test finalize reason");
verify(this.loader).fetchExecutableFlow(flow1.getExecutionId());
verify(this.loader).fetchExecutableFlow(flow2.getExecutionId());
}
|
Test that exceptions during flow finalization do not block finalization of subsequent flow
for an executor.
|
testFailureDuringFinalization
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
|
Apache-2.0
|
@Test
public void testDispatchException() throws Exception {
testSetUpForRunningFlows();
this.manager.start();
final ExecutableFlow flow1 = TestUtils.createTestExecutableFlow("exectest1", "exec1", DispatchMethod.PUSH);
doReturn(flow1).when(this.executorLoader).fetchExecutableFlow(-1);
mockFlowDoesNotExist();
when(this.apiGateway.callWithExecutable(any(), any(), eq(ConnectorParams.EXECUTE_ACTION)))
.thenThrow(new ExecutorManagerException("Mocked dispatch exception"))
.thenReturn(null);
this.manager.submitExecutableFlow(flow1, this.user.getUserId());
waitFlowFinished(flow1);
verify(this.apiGateway)
.callWithExecutable(flow1, this.manager.fetchExecutor(1), ConnectorParams.EXECUTE_ACTION);
verify(this.apiGateway)
.callWithExecutable(flow1, this.manager.fetchExecutor(2), ConnectorParams.EXECUTE_ACTION);
verify(this.executorLoader, Mockito.times(1)).unassignExecutor(-1);
}
|
1. Executor 1 throws an exception when trying to dispatch to it 2. ExecutorManager should try
next executor 3. Executor 2 accepts the dispatched execution
|
testDispatchException
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
@Ignore
@Test
public void testDispatchFailed() throws Exception {
testSetUpForRunningFlows();
this.manager.start();
final ExecutableFlow flow1 = TestUtils.createTestExecutableFlow("exectest1", "exec1", DispatchMethod.PUSH);
flow1.getExecutionOptions().setFailureEmails(Arrays.asList("test@example.com"));
when(this.executorLoader.fetchExecutableFlow(-1)).thenReturn(flow1);
when(this.apiGateway.callWithExecutable(any(), any(), eq(ConnectorParams.EXECUTE_ACTION)))
.thenThrow(new ExecutorManagerException("Mocked dispatch exception"));
this.manager.submitExecutableFlow(flow1, this.user.getUserId());
waitFlowFinished(flow1);
verify(this.apiGateway)
.callWithExecutable(flow1, this.manager.fetchExecutor(1), ConnectorParams.EXECUTE_ACTION);
verify(this.apiGateway)
.callWithExecutable(flow1, this.manager.fetchExecutor(2), ConnectorParams.EXECUTE_ACTION);
verify(this.executorLoader, Mockito.times(2)).unassignExecutor(-1);
verify(this.mailAlerter).alertOnError(eq(flow1),
eq("Failed to dispatch queued execution derived-member-data because reached "
+ "azkaban.maxDispatchingErrors (tried 2 executors)"),
contains("Mocked dispatch exception"));
}
|
ExecutorManager should try to dispatch to all executors & when both fail it should remove the
execution from queue and finalize it.
|
testDispatchFailed
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
private void mockFlowDoesNotExist() throws Exception {
mockUpdateResponse(ImmutableMap.of(ConnectorParams.RESPONSE_UPDATED_FLOWS,
Collections.singletonList(ImmutableMap.of(
ConnectorParams.UPDATE_MAP_EXEC_ID, -1,
"error", "Flow does not exist"))));
}
|
ExecutorManager should try to dispatch to all executors & when both fail it should remove the
execution from queue and finalize it.
|
mockFlowDoesNotExist
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
@SuppressWarnings("unchecked")
private void mockUpdateResponse(
final Map<String, List<Map<String, Object>>> map) throws Exception {
doReturn(map).when(this.apiGateway).updateExecutions(any(), any());
}
|
ExecutorManager should try to dispatch to all executors & when both fail it should remove the
execution from queue and finalize it.
|
mockUpdateResponse
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
@Test
public void testSubmitFlows() throws Exception {
testSetUpForRunningFlows();
final ExecutableFlow flow1 = TestUtils.createTestExecutableFlow("exectest1", "exec1", DispatchMethod.PUSH);
this.manager.submitExecutableFlow(flow1, this.user.getUserId());
verify(this.executorLoader).uploadExecutableFlow(flow1);
verify(this.executorLoader).addActiveExecutableReference(any());
}
|
ExecutorManager should try to dispatch to all executors & when both fail it should remove the
execution from queue and finalize it.
|
testSubmitFlows
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
@Test(expected = ExecutorManagerException.class)
public void testTooManySubmitFlows() throws Exception {
testSetUpForRunningFlows();
final ExecutableFlow flow1 = TestUtils
.createTestExecutableFlowFromYaml("basicyamlshelltest", "bashSleep");
flow1.setExecutionId(101);
final ExecutableFlow flow2 = TestUtils
.createTestExecutableFlowFromYaml("basicyamlshelltest", "bashSleep");
flow2.setExecutionId(102);
final ExecutableFlow flow3 = TestUtils
.createTestExecutableFlowFromYaml("basicyamlshelltest", "bashSleep");
flow3.setExecutionId(103);
final ExecutableFlow flow4 = TestUtils
.createTestExecutableFlowFromYaml("basicyamlshelltest", "bashSleep");
flow4.setExecutionId(104);
this.manager.submitExecutableFlow(flow1, this.user.getUserId());
verify(this.executorLoader).uploadExecutableFlow(flow1);
this.manager.submitExecutableFlow(flow2, this.user.getUserId());
verify(this.executorLoader).uploadExecutableFlow(flow2);
this.manager.submitExecutableFlow(flow3, this.user.getUserId());
this.manager.submitExecutableFlow(flow4, this.user.getUserId());
}
|
ExecutorManager should try to dispatch to all executors & when both fail it should remove the
execution from queue and finalize it.
|
testTooManySubmitFlows
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
@Test(expected = ExecutorManagerException.class)
public void testConcurrentRunWhitelist() throws Exception {
testSetUpForRunningFlows();
this.props.put(ConfigurationKeys.CONCURRENT_RUNS_ONEFLOW_WHITELIST, "basicyamlshelltest,"
+ "bashSleep,4");
final ExecutableFlow flow1 = TestUtils
.createTestExecutableFlowFromYaml("basicyamlshelltest", "bashSleep");
flow1.setExecutionId(101);
final ExecutableFlow flow2 = TestUtils
.createTestExecutableFlowFromYaml("basicyamlshelltest", "bashSleep");
flow2.setExecutionId(102);
final ExecutableFlow flow3 = TestUtils
.createTestExecutableFlowFromYaml("basicyamlshelltest", "bashSleep");
flow3.setExecutionId(103);
final ExecutableFlow flow4 = TestUtils
.createTestExecutableFlowFromYaml("basicyamlshelltest", "bashSleep");
flow4.setExecutionId(104);
this.manager.submitExecutableFlow(flow1, this.user.getUserId());
verify(this.executorLoader).uploadExecutableFlow(flow1);
this.manager.submitExecutableFlow(flow2, this.user.getUserId());
verify(this.executorLoader).uploadExecutableFlow(flow2);
this.manager.submitExecutableFlow(flow3, this.user.getUserId());
verify(this.executorLoader).uploadExecutableFlow(flow3);
this.manager.submitExecutableFlow(flow4, this.user.getUserId());
verify(this.executorLoader).uploadExecutableFlow(flow4);
}
|
ExecutorManager should try to dispatch to all executors & when both fail it should remove the
execution from queue and finalize it.
|
testConcurrentRunWhitelist
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
@Ignore
@Test
public void testFetchActiveFlowByProject() throws Exception {
testSetUpForRunningFlows();
final List<Integer> executions = this.manager.getRunningFlowIds(this.flow1.getProjectId(),
this.flow1.getFlowId());
Assert.assertTrue(executions.contains(this.flow1.getExecutionId()));
}
|
ExecutorManager should try to dispatch to all executors & when both fail it should remove the
execution from queue and finalize it.
|
testFetchActiveFlowByProject
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
@Ignore
@Test
public void testFetchActiveFlowWithExecutor() throws Exception {
testSetUpForRunningFlows();
final List<Pair<ExecutableFlow, Optional<Executor>>> activeFlowsWithExecutor =
this.manager.getActiveFlowsWithExecutor();
Assert.assertTrue(activeFlowsWithExecutor.contains(new Pair<>(this.flow1,
Optional.ofNullable(this.manager.fetchExecutor(this.flow1.getExecutionId())))));
Assert.assertTrue(activeFlowsWithExecutor.contains(new Pair<>(this.flow2,
Optional.ofNullable(this.manager.fetchExecutor(this.flow2.getExecutionId())))));
}
|
ExecutorManager should try to dispatch to all executors & when both fail it should remove the
execution from queue and finalize it.
|
testFetchActiveFlowWithExecutor
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
@Test
public void testFetchAllActiveExecutorServerHosts() throws Exception {
testSetUpForRunningFlows();
final Set<String> activeExecutorServerHosts = this.manager.getAllActiveExecutorServerHosts();
final Executor executor1 = this.manager.fetchExecutor(this.flow1.getExecutionId());
final Executor executor2 = this.manager.fetchExecutor(this.flow2.getExecutionId());
Assert.assertTrue(
activeExecutorServerHosts.contains(executor1.getHost() + ":" + executor1.getPort()));
Assert.assertTrue(
activeExecutorServerHosts.contains(executor2.getHost() + ":" + executor2.getPort()));
}
|
ExecutorManager should try to dispatch to all executors & when both fail it should remove the
execution from queue and finalize it.
|
testFetchAllActiveExecutorServerHosts
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
@Test
public void testDispatchMultipleRetries() throws Exception {
this.props.put(Constants.ConfigurationKeys.MAX_DISPATCHING_ERRORS_PERMITTED, 4);
testSetUpForRunningFlows();
this.manager.start();
final ExecutableFlow flow1 = TestUtils.createTestExecutableFlow("exectest1", "exec1", DispatchMethod.PUSH);
flow1.getExecutionOptions().setFailureEmails(Arrays.asList("test@example.com"));
when(this.executorLoader.fetchExecutableFlow(-1)).thenReturn(flow1);
// fail 2 first dispatch attempts, then succeed
when(this.apiGateway.callWithExecutable(any(), any(), eq(ConnectorParams.EXECUTE_ACTION)))
.thenThrow(new ExecutorManagerException("Mocked dispatch exception 1"))
.thenThrow(new ExecutorManagerException("Mocked dispatch exception 2"))
.thenReturn(null);
// this is just to clean up the execution as FAILED after it has been submitted
mockFlowDoesNotExist();
this.manager.submitExecutableFlow(flow1, this.user.getUserId());
waitFlowFinished(flow1);
// it's random which executor is chosen each time, but both should have been tried at least once
verify(this.apiGateway, Mockito.atLeast(1))
.callWithExecutable(flow1, this.manager.fetchExecutor(1), ConnectorParams.EXECUTE_ACTION);
verify(this.apiGateway, Mockito.atLeast(1))
.callWithExecutable(flow1, this.manager.fetchExecutor(2), ConnectorParams.EXECUTE_ACTION);
// verify that there was a 3rd (successful) dispatch call
verify(this.apiGateway, Mockito.times(3))
.callWithExecutable(eq(flow1), any(), eq(ConnectorParams.EXECUTE_ACTION));
verify(this.executorLoader, Mockito.times(2)).unassignExecutor(-1);
}
|
ExecutorManager should try to dispatch to all executors until it succeeds.
|
testDispatchMultipleRetries
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
@Test
public void testSetFlowLock() throws Exception {
testSetUpForRunningFlows();
final ExecutableFlow flow1 = TestUtils.createTestExecutableFlow("exectest1", "exec1", DispatchMethod.PUSH);
flow1.setLocked(true);
final String msg = this.manager.submitExecutableFlow(flow1, this.user.getUserId());
assertThat(msg).isEqualTo("Flow derived-member-data for project flow is locked.");
// unlock the flow
flow1.setLocked(false);
this.manager.submitExecutableFlow(flow1, this.user.getUserId());
verify(this.executorLoader).uploadExecutableFlow(flow1);
verify(this.executorLoader).addActiveExecutableReference(any());
}
|
ExecutorManager should try to dispatch to all executors until it succeeds.
|
testSetFlowLock
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
@Test
public void testGetApplicationIdsFromLog() throws Exception {
testSetUpForRunningFlows();
this.runningExecutions.get().put(1, new Pair<>(this.ref1, this.flow1));
// Verify that application ids are obtained successfully from the log data.
final String logData1 = "Submitted application_12345_6789.";
when(this.apiGateway.callWithReference(any(), eq(ConnectorParams.LOG_ACTION), any()))
.then(getLogChunksMock(logData1));
Set<String> appIds = this.manager.getApplicationIds(this.flow1, "job1", 0);
Assert.assertEquals(1, appIds.size());
Assert.assertEquals("12345_6789", appIds.iterator().next());
final String logData2 = " Submitted application_12345_6789.\n AttemptID: attempt_12345_6789. "
+ "Accepted application_98765_4321.";
when(this.apiGateway.callWithReference(any(), eq(ConnectorParams.LOG_ACTION), any()))
.then(getLogChunksMock(logData2));
appIds = this.manager.getApplicationIds(this.flow1, "job1", 0);
Assert.assertEquals(2, appIds.size());
final Iterator iterator = appIds.iterator();
Assert.assertEquals("12345_6789", iterator.next());
Assert.assertEquals("98765_4321", iterator.next());
// Verify that an empty list is returned when log data length is 0 (no new data available).
when(this.apiGateway.callWithReference(any(), eq(ConnectorParams.LOG_ACTION), any()))
.then(getLogChunksMock(""));
Assert.assertEquals(0, this.manager.getApplicationIds(this.flow1, "job1", 0).size());
}
|
Test fetching application ids from log data.
@throws Exception the exception
|
testGetApplicationIdsFromLog
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
private Answer<Object> getLogChunksMock(final String logData) {
return invocationOnMock -> {
String offsetStr = null, lengthStr = null;
for (final Object arg : invocationOnMock.getArguments()) {
if (!(arg instanceof Pair)) {
continue;
}
final Pair pairArg = (Pair) arg;
if ("offset".equals(pairArg.getFirst())) {
offsetStr = (String) pairArg.getSecond();
} else if ("length".equals(pairArg.getFirst())) {
lengthStr = (String) pairArg.getSecond();
}
}
Assert.assertNotNull(offsetStr);
Assert.assertNotNull(lengthStr);
final int offset = Integer.parseInt(offsetStr);
final int length = Integer.parseInt(lengthStr);
final int actualLength = Math.min(length, Math.max(0, logData.length() - offset));
final String logChunk = logData.substring(offset, offset + actualLength);
return ImmutableMap.of("offset", offset, "length", actualLength, "data", logChunk);
};
}
|
Test fetching application ids from log data.
@throws Exception the exception
|
getLogChunksMock
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
private void testSetUpForRunningFlows() throws Exception {
this.executorLoader = mock(ExecutorLoader.class);
this.apiGateway = mock(ExecutorApiGateway.class);
this.user = TestUtils.getTestUser();
this.props.put(Constants.ConfigurationKeys.USE_MULTIPLE_EXECUTORS, "true");
//To test runningFlows, AZKABAN_QUEUEPROCESSING_ENABLED should be set to true
//so that flows will be dispatched to executors.
this.props.put(Constants.ConfigurationKeys.QUEUEPROCESSING_ENABLED, "true");
// allow two concurrent runs give one Flow
this.props.put(Constants.ConfigurationKeys.MAX_CONCURRENT_RUNS_ONEFLOW, 2);
final List<Executor> executors = new ArrayList<>();
final Executor executor1 = new Executor(1, "localhost", 12345, true);
final Executor executor2 = new Executor(2, "localhost", 12346, true);
executors.add(executor1);
executors.add(executor2);
when(this.executorLoader.fetchActiveExecutors()).thenReturn(executors);
this.manager = createExecutorManager();
this.flow1 = TestUtils.createTestExecutableFlow("exectest1", "exec1", DispatchMethod.PUSH);
this.flow2 = TestUtils.createTestExecutableFlow("exectest1", "exec2", DispatchMethod.PUSH);
this.flow1.setExecutionId(1);
this.flow2.setExecutionId(2);
this.ref1 = new ExecutionReference(this.flow1.getExecutionId(), executor1, DispatchMethod.PUSH);
this.ref2 = new ExecutionReference(this.flow2.getExecutionId(), executor2, DispatchMethod.PUSH);
this.activeFlows.put(this.flow1.getExecutionId(), new Pair<>(this.ref1, this.flow1));
this.activeFlows.put(this.flow2.getExecutionId(), new Pair<>(this.ref2, this.flow2));
when(this.executorLoader.fetchActiveFlows(any())).thenReturn(this.activeFlows);
}
|
Test fetching application ids from log data.
@throws Exception the exception
|
testSetUpForRunningFlows
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
private ExecutableFlow waitFlowFinished(final ExecutableFlow flow) throws Exception {
azkaban.test.TestUtils.await().untilAsserted(() -> assertThat(getFlowStatus(flow))
.isNotNull().matches(Status::isStatusFinished, "isStatusFinished"));
return fetchFlow(flow);
}
|
Test fetching application ids from log data.
@throws Exception the exception
|
waitFlowFinished
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
private Status getFlowStatus(final ExecutableFlow flow) throws Exception {
return fetchFlow(flow) != null ? fetchFlow(flow).getStatus() : null;
}
|
Test fetching application ids from log data.
@throws Exception the exception
|
getFlowStatus
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
private ExecutableFlow fetchFlow(final ExecutableFlow flow) throws ExecutorManagerException {
return this.executorLoader.fetchExecutableFlow(flow.getExecutionId());
}
|
Test fetching application ids from log data.
@throws Exception the exception
|
fetchFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
|
Apache-2.0
|
@Before
public void setUp() throws Exception {
this.rs = Mockito.mock(ResultSet.class);
}
|
Also @see ExecutionFlowDaoTest - DB operations of FetchActiveFlowDao are tested there.
|
setUp
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/FetchActiveFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/FetchActiveFlowDaoTest.java
|
Apache-2.0
|
@Test
public void handleResultMissingExecutor() throws Exception {
final FetchActiveExecutableFlows resultHandler = new FetchActiveExecutableFlows();
mockResultWithData();
final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> result = resultHandler
.handle(this.rs);
assertThat(result.containsKey(1)).isTrue();
assertThat(result.get(1).getFirst().getExecutor().isPresent()).isFalse();
}
|
Also @see ExecutionFlowDaoTest - DB operations of FetchActiveFlowDao are tested there.
|
handleResultMissingExecutor
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/FetchActiveFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/FetchActiveFlowDaoTest.java
|
Apache-2.0
|
@Test
public void handleResultNullData() throws Exception {
final FetchActiveExecutableFlows resultHandler = new FetchActiveExecutableFlows();
mockResultWithNullData();
final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> result = resultHandler
.handle(this.rs);
assertThat(result).isEmpty();
}
|
Also @see ExecutionFlowDaoTest - DB operations of FetchActiveFlowDao are tested there.
|
handleResultNullData
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/FetchActiveFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/FetchActiveFlowDaoTest.java
|
Apache-2.0
|
private void mockResultWithData() throws Exception {
final ExecutableFlow flow = TestUtils.createTestExecutableFlow("exectest1", "exec1", DispatchMethod.POLL);
final String json = JSONUtils.toJSON(flow.toObject());
final byte[] data = json.getBytes("UTF-8");
mockExecution(EncodingType.PLAIN.getNumVal(), data);
}
|
Also @see ExecutionFlowDaoTest - DB operations of FetchActiveFlowDao are tested there.
|
mockResultWithData
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/FetchActiveFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/FetchActiveFlowDaoTest.java
|
Apache-2.0
|
private void mockResultWithNullData() throws SQLException {
mockExecution(0, null);
}
|
Also @see ExecutionFlowDaoTest - DB operations of FetchActiveFlowDao are tested there.
|
mockResultWithNullData
|
java
|
azkaban/azkaban
|
azkaban-common/src/test/java/azkaban/executor/FetchActiveFlowDaoTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/FetchActiveFlowDaoTest.java
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.