index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/resourcecluster/ResourceClustersHostManagerActorTests.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.argThat;
import static org.mockito.Mockito.atMost;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.testkit.javadsl.TestKit;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode;
import io.mantisrx.master.resourcecluster.proto.GetResourceClusterSpecRequest;
import io.mantisrx.master.resourcecluster.proto.ListResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.MantisResourceClusterEnvType;
import io.mantisrx.master.resourcecluster.proto.MantisResourceClusterSpec;
import io.mantisrx.master.resourcecluster.proto.ProvisionResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.GetResourceClusterResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.ListResourceClustersResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterProvisionSubmissionResponse;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersRequest;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersResponse;
import io.mantisrx.master.resourcecluster.resourceprovider.ResourceClusterProvider;
import io.mantisrx.master.resourcecluster.resourceprovider.ResourceClusterResponseHandler;
import io.mantisrx.master.resourcecluster.writable.ResourceClusterSpecWritable;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.persistence.InMemoryPersistenceProvider;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import java.io.IOException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class ResourceClustersHostManagerActorTests {
static ActorSystem system;
private final IMantisPersistenceProvider storageProvider = mock(IMantisPersistenceProvider.class);
@BeforeClass
public static void setup() {
Config config = ConfigFactory.parseString("akka {\n" +
" loggers = [\"akka.testkit.TestEventListener\"]\n" +
" loglevel = \"INFO\"\n" +
" stdout-loglevel = \"INFO\"\n" +
" test.single-expect-default = 300000 millis\n" +
"}\n");
system = ActorSystem.create("ResourceClusterManagerUnitTest", config.withFallback(ConfigFactory.load()));
}
@AfterClass
public static void tearDown() {
TestKit.shutdownActorSystem(system);
system = null;
}
@Test
public void testProvisionAndGetResourceCluster() {
TestKit probe = new TestKit(system);
ResourceClusterProvider resProvider = mock(ResourceClusterProvider.class);
ResourceClusterResponseHandler responseHandler = mock(ResourceClusterResponseHandler.class);
ResourceClusterProvisionSubmissionResponse provisionResponse =
ResourceClusterProvisionSubmissionResponse.builder().response("123").build();
when(resProvider.provisionClusterIfNotPresent(any())).thenReturn(CompletableFuture.completedFuture(
provisionResponse
));
when(resProvider.getResponseHandler()).thenReturn(responseHandler);
ActorRef resourceClusterActor = system.actorOf(ResourceClustersHostManagerActor.props(resProvider, new InMemoryPersistenceProvider()));
ProvisionResourceClusterRequest request = buildProvisionRequest();
resourceClusterActor.tell(request, probe.getRef());
GetResourceClusterResponse createResp = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(request.getClusterSpec(), createResp.getClusterSpec());
ListResourceClusterRequest listReq = ListResourceClusterRequest.builder().build();
resourceClusterActor.tell(listReq, probe.getRef());
ListResourceClustersResponse listResp = probe.expectMsgClass(ListResourceClustersResponse.class);
assertEquals(1, listResp.getRegisteredResourceClusters().size());
assertEquals(request.getClusterId(), listResp.getRegisteredResourceClusters().get(0).getId());
GetResourceClusterSpecRequest getReq =
GetResourceClusterSpecRequest.builder().id(request.getClusterId()).build();
resourceClusterActor.tell(getReq, probe.getRef());
GetResourceClusterResponse getResp = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(request.getClusterSpec(), getResp.getClusterSpec());
// verify access API
verify(resProvider).provisionClusterIfNotPresent(request);
verify(responseHandler).handleProvisionResponse(provisionResponse);
// add second cluster
ProvisionResourceClusterRequest request2 = buildProvisionRequest("clsuter2", "dev2@mantisrx.io");
resourceClusterActor.tell(request2, probe.getRef());
GetResourceClusterResponse createResp2 = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(request2.getClusterSpec(), createResp2.getClusterSpec());
ListResourceClusterRequest listReq2 = ListResourceClusterRequest.builder().build();
resourceClusterActor.tell(listReq2, probe.getRef());
ListResourceClustersResponse listResp2 = probe.expectMsgClass(ListResourceClustersResponse.class);
assertEquals(2, listResp2.getRegisteredResourceClusters().size());
assertEquals(1,
listResp2.getRegisteredResourceClusters().stream().filter(e -> e.getId().equals(request2.getClusterId())).count());
GetResourceClusterSpecRequest getReq2 =
GetResourceClusterSpecRequest.builder().id(request2.getClusterId()).build();
resourceClusterActor.tell(getReq2, probe.getRef());
GetResourceClusterResponse getResp2 = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(request2.getClusterSpec(), getResp2.getClusterSpec());
// verify access API
verify(resProvider, times(1)).provisionClusterIfNotPresent(request2);
verify(responseHandler, times(2)).handleProvisionResponse(provisionResponse);
probe.getSystem().stop(resourceClusterActor);
}
@Test
public void testProvisionSpecError() {
TestKit probe = new TestKit(system);
ResourceClusterProvider resProvider = mock(ResourceClusterProvider.class);
ResourceClusterResponseHandler responseHandler = mock(ResourceClusterResponseHandler.class);
ResourceClusterProvisionSubmissionResponse provisionResponse =
ResourceClusterProvisionSubmissionResponse.builder().response("123").build();
when(resProvider.provisionClusterIfNotPresent(any())).thenReturn(CompletableFuture.completedFuture(
provisionResponse
));
when(resProvider.getResponseHandler()).thenReturn(responseHandler);
ActorRef resourceClusterHostActor = system.actorOf(
ResourceClustersHostManagerActor.props(resProvider, storageProvider));
ProvisionResourceClusterRequest request =
ProvisionResourceClusterRequest.builder()
.clusterId(ClusterID.of("id1"))
.build();
resourceClusterHostActor.tell(request, probe.getRef());
GetResourceClusterResponse createResp = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(ResponseCode.CLIENT_ERROR, createResp.responseCode);
verify(resProvider, times(0)).provisionClusterIfNotPresent(any());
request =
ProvisionResourceClusterRequest.builder()
.clusterId(ClusterID.of("id1"))
.clusterSpec(MantisResourceClusterSpec.builder()
.id(ClusterID.of("id2"))
.build())
.build();
resourceClusterHostActor.tell(request, probe.getRef());
createResp = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(ResponseCode.CLIENT_ERROR, createResp.responseCode);
verify(resProvider, times(0)).provisionClusterIfNotPresent(any());
request =
ProvisionResourceClusterRequest.builder()
.clusterId(ClusterID.of("id1"))
.clusterSpec(MantisResourceClusterSpec.builder()
.id(ClusterID.of("id1"))
.name("id1name")
.envType(MantisResourceClusterEnvType.Prod)
.ownerEmail("user")
.ownerName("user")
.skuSpec(MantisResourceClusterSpec.SkuTypeSpec.builder()
//.skuId(ContainerSkuID.of("small"))
.capacity(MantisResourceClusterSpec.SkuCapacity.builder()
.skuId(ContainerSkuID.of("small"))
.desireSize(2)
.maxSize(3)
.minSize(1)
.build())
.cpuCoreCount(2)
.memorySizeInMB(16384)
.diskSizeInMB(81920)
.networkMbps(700)
.imageId("dev/mantistaskexecutor:main-latest")
.skuMetadataField(
"skuKey",
"us-east-1")
.skuMetadataField(
"sgKey",
"sg-11, sg-22, sg-33, sg-44")
.build())
.build())
.build();
resourceClusterHostActor.tell(request, probe.getRef());
createResp = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(ResponseCode.CLIENT_ERROR, createResp.responseCode);
verify(resProvider, times(0)).provisionClusterIfNotPresent(any());
request =
ProvisionResourceClusterRequest.builder()
.clusterId(ClusterID.of("id1"))
.clusterSpec(MantisResourceClusterSpec.builder()
.id(ClusterID.of("id1"))
.name("id1name")
.envType(MantisResourceClusterEnvType.Prod)
.ownerEmail("user")
.ownerName("user")
.skuSpec(MantisResourceClusterSpec.SkuTypeSpec.builder()
.skuId(ContainerSkuID.of("small"))
.cpuCoreCount(2)
.memorySizeInMB(16384)
.diskSizeInMB(81920)
.networkMbps(700)
.imageId("dev/mantistaskexecutor:main-latest")
.skuMetadataField(
"skuKey",
"us-east-1")
.skuMetadataField(
"sgKey",
"sg-11, sg-22, sg-33, sg-44")
.build())
.build())
.build();
resourceClusterHostActor.tell(request, probe.getRef());
createResp = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(ResponseCode.CLIENT_ERROR, createResp.responseCode);
verify(resProvider, times(0)).provisionClusterIfNotPresent(any());
request =
ProvisionResourceClusterRequest.builder()
.clusterId(ClusterID.of("id1"))
.clusterSpec(MantisResourceClusterSpec.builder()
.id(ClusterID.of("id1"))
.name("id1name")
.envType(MantisResourceClusterEnvType.Prod)
.ownerEmail("user")
.ownerName("user")
.skuSpec(MantisResourceClusterSpec.SkuTypeSpec.builder()
.skuId(ContainerSkuID.of("small"))
.capacity(MantisResourceClusterSpec.SkuCapacity.builder()
.skuId(ContainerSkuID.of("small"))
.desireSize(2)
.maxSize(3)
.minSize(1)
.build())
.memorySizeInMB(16384)
.diskSizeInMB(81920)
.networkMbps(700)
.imageId("dev/mantistaskexecutor:main-latest")
.skuMetadataField(
"skuKey",
"us-east-1")
.skuMetadataField(
"sgKey",
"sg-11, sg-22, sg-33, sg-44")
.build())
.build())
.build();
resourceClusterHostActor.tell(request, probe.getRef());
createResp = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(ResponseCode.CLIENT_ERROR, createResp.responseCode);
verify(resProvider, times(0)).provisionClusterIfNotPresent(any());
request =
ProvisionResourceClusterRequest.builder()
.clusterId(ClusterID.of("id1"))
.clusterSpec(MantisResourceClusterSpec.builder()
.id(ClusterID.of("id1"))
.name("id1name")
.envType(MantisResourceClusterEnvType.Prod)
.ownerEmail("user")
.ownerName("user")
.skuSpec(MantisResourceClusterSpec.SkuTypeSpec.builder()
.skuId(ContainerSkuID.of("small"))
.capacity(MantisResourceClusterSpec.SkuCapacity.builder()
.skuId(ContainerSkuID.of("small"))
.desireSize(2)
.maxSize(3)
.minSize(1)
.build())
.cpuCoreCount(2)
.diskSizeInMB(81920)
.networkMbps(700)
.imageId("dev/mantistaskexecutor:main-latest")
.skuMetadataField(
"skuKey",
"us-east-1")
.skuMetadataField(
"sgKey",
"sg-11, sg-22, sg-33, sg-44")
.build())
.build())
.build();
resourceClusterHostActor.tell(request, probe.getRef());
createResp = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(ResponseCode.CLIENT_ERROR, createResp.responseCode);
verify(resProvider, times(0)).provisionClusterIfNotPresent(any());
request =
ProvisionResourceClusterRequest.builder()
.clusterId(ClusterID.of("id1"))
.clusterSpec(MantisResourceClusterSpec.builder()
.id(ClusterID.of("id1"))
.name("id1name")
.envType(MantisResourceClusterEnvType.Prod)
.ownerEmail("user")
.ownerName("user")
.skuSpec(MantisResourceClusterSpec.SkuTypeSpec.builder()
.skuId(ContainerSkuID.of("small"))
.capacity(MantisResourceClusterSpec.SkuCapacity.builder()
.skuId(ContainerSkuID.of("small"))
.desireSize(2)
.maxSize(3)
.minSize(1)
.build())
.cpuCoreCount(2)
.memorySizeInMB(16384)
.diskSizeInMB(0)
.networkMbps(700)
.imageId("dev/mantistaskexecutor:main-latest")
.skuMetadataField(
"skuKey",
"us-east-1")
.skuMetadataField(
"sgKey",
"sg-11, sg-22, sg-33, sg-44")
.build())
.build())
.build();
resourceClusterHostActor.tell(request, probe.getRef());
createResp = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(ResponseCode.CLIENT_ERROR, createResp.responseCode);
verify(resProvider, times(0)).provisionClusterIfNotPresent(any());
probe.getSystem().stop(resourceClusterHostActor);
}
@Test
public void testProvisionPersisError() throws IOException {
TestKit probe = new TestKit(system);
ResourceClusterProvider resProvider = mock(ResourceClusterProvider.class);
ResourceClusterResponseHandler responseHandler = mock(ResourceClusterResponseHandler.class);
ResourceClusterProvisionSubmissionResponse provisionResponse =
ResourceClusterProvisionSubmissionResponse.builder().response("123").build();
when(resProvider.provisionClusterIfNotPresent(any())).thenReturn(CompletableFuture.completedFuture(
provisionResponse
));
IOException err = new IOException("persist error");
when(storageProvider.registerAndUpdateClusterSpec(any())).thenThrow(err);
when(resProvider.getResponseHandler()).thenReturn(responseHandler);
ActorRef resourceClusterActor = system.actorOf(
ResourceClustersHostManagerActor.props(resProvider, storageProvider));
ProvisionResourceClusterRequest request = buildProvisionRequest();
resourceClusterActor.tell(request, probe.getRef());
GetResourceClusterResponse createResp = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(ResponseCode.SERVER_ERROR, createResp.responseCode);
verify(resProvider, times(0)).provisionClusterIfNotPresent(any());
verify(responseHandler, atMost(1)).handleProvisionResponse(
argThat(ar -> ar.getResponse().equals(err.toString())));
probe.getSystem().stop(resourceClusterActor);
}
@Test
public void testProvisionSubmitError() throws InterruptedException {
TestKit probe = new TestKit(system);
CountDownLatch latch = new CountDownLatch(1);
ResourceClusterProvider resProvider = mock(ResourceClusterProvider.class);
ResourceClusterResponseHandler responseHandler = mock(ResourceClusterResponseHandler.class);
when(resProvider.provisionClusterIfNotPresent(any())).thenReturn(
CompletableFuture.supplyAsync(() -> {
latch.countDown();
throw new RuntimeException("test err msg");
}));
when(resProvider.getResponseHandler()).thenReturn(responseHandler);
ActorRef resourceClusterActor = system.actorOf(ResourceClustersHostManagerActor.props(resProvider, new InMemoryPersistenceProvider()));
ProvisionResourceClusterRequest request = buildProvisionRequest();
resourceClusterActor.tell(request, probe.getRef());
GetResourceClusterResponse createResp = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(request.getClusterSpec(), createResp.getClusterSpec());
ListResourceClusterRequest listReq = ListResourceClusterRequest.builder().build();
resourceClusterActor.tell(listReq, probe.getRef());
ListResourceClustersResponse listResp = probe.expectMsgClass(ListResourceClustersResponse.class);
assertEquals(1, listResp.getRegisteredResourceClusters().size());
assertEquals(request.getClusterId(), listResp.getRegisteredResourceClusters().get(0).getId());
GetResourceClusterSpecRequest getReq =
GetResourceClusterSpecRequest.builder().id(request.getClusterId()).build();
resourceClusterActor.tell(getReq, probe.getRef());
GetResourceClusterResponse getResp = probe.expectMsgClass(GetResourceClusterResponse.class);
assertEquals(request.getClusterSpec(), getResp.getClusterSpec());
assertEquals(ResponseCode.SUCCESS, getResp.responseCode);
latch.await(3, TimeUnit.SECONDS);
verify(resProvider).provisionClusterIfNotPresent(request);
verify(responseHandler).handleProvisionResponse(argThat(r ->
r.getError().getCause().getMessage()
.equals("test err msg")));
probe.getSystem().stop(resourceClusterActor);
}
@Test
public void testUpgradeRequest() {
TestKit probe = new TestKit(system);
ResourceClusterProvider resProvider = mock(ResourceClusterProvider.class);
ResourceClusterResponseHandler responseHandler = mock(ResourceClusterResponseHandler.class);
UpgradeClusterContainersResponse upgradeRes =
UpgradeClusterContainersResponse.builder().responseCode(ResponseCode.SUCCESS).build();
when(resProvider.upgradeContainerResource(any())).thenReturn(CompletableFuture.completedFuture(
upgradeRes
));
when(resProvider.getResponseHandler()).thenReturn(responseHandler);
ActorRef resourceClusterActor = system.actorOf(
ResourceClustersHostManagerActor.props(resProvider, new InMemoryPersistenceProvider()));
UpgradeClusterContainersRequest request = UpgradeClusterContainersRequest.builder()
.clusterId(ClusterID.of("mantisTestResCluster1"))
.build();
resourceClusterActor.tell(request, probe.getRef());
UpgradeClusterContainersResponse createResp = probe.expectMsgClass(UpgradeClusterContainersResponse.class);
assertEquals(ResponseCode.SUCCESS, createResp.responseCode);
verify(resProvider, times(1)).upgradeContainerResource(any());
probe.getSystem().stop(resourceClusterActor);
}
@Test
public void testUpgradeRequestEnableSkuSpecUpgrade() throws IOException {
TestKit probe = new TestKit(system);
IMantisPersistenceProvider resStorageProvider = mock(IMantisPersistenceProvider.class);
ResourceClusterProvider resProvider = mock(ResourceClusterProvider.class);
ResourceClusterResponseHandler responseHandler = mock(ResourceClusterResponseHandler.class);
UpgradeClusterContainersResponse upgradeRes =
UpgradeClusterContainersResponse.builder().responseCode(ResponseCode.SUCCESS).build();
when(resProvider.upgradeContainerResource(any())).thenReturn(CompletableFuture.completedFuture(
upgradeRes
));
ProvisionResourceClusterRequest provisionReq = buildProvisionRequest();
when(resStorageProvider.getResourceClusterSpecWritable(any()))
.thenReturn(
ResourceClusterSpecWritable.builder()
.clusterSpec(provisionReq.getClusterSpec())
.id(provisionReq.getClusterId())
.build());
when(resProvider.getResponseHandler()).thenReturn(responseHandler);
ActorRef resourceClusterActor = system.actorOf(
ResourceClustersHostManagerActor.props(resProvider, resStorageProvider));
UpgradeClusterContainersRequest request = UpgradeClusterContainersRequest.builder()
.clusterId(provisionReq.getClusterId())
.enableSkuSpecUpgrade(true)
.build();
resourceClusterActor.tell(request, probe.getRef());
UpgradeClusterContainersResponse createResp = probe.expectMsgClass(UpgradeClusterContainersResponse.class);
assertEquals(ResponseCode.SUCCESS, createResp.responseCode);
verify(resProvider, times(1)).upgradeContainerResource(any());
probe.getSystem().stop(resourceClusterActor);
}
private ProvisionResourceClusterRequest buildProvisionRequest() {
return buildProvisionRequest("mantisTestResCluster1", "dev@mantisrx.io");
}
private ProvisionResourceClusterRequest buildProvisionRequest(String id, String user) {
ProvisionResourceClusterRequest request = ProvisionResourceClusterRequest.builder()
.clusterId(ClusterID.of(id))
.clusterSpec(MantisResourceClusterSpec.builder()
.id(ClusterID.of(id))
.name(id)
.envType(MantisResourceClusterEnvType.Prod)
.ownerEmail(user)
.ownerName(user)
.skuSpec(MantisResourceClusterSpec.SkuTypeSpec.builder()
.skuId(ContainerSkuID.of("small"))
.capacity(MantisResourceClusterSpec.SkuCapacity.builder()
.skuId(ContainerSkuID.of("small"))
.desireSize(2)
.maxSize(3)
.minSize(1)
.build())
.cpuCoreCount(2)
.memorySizeInMB(16384)
.diskSizeInMB(81920)
.networkMbps(700)
.imageId("dev/mantistaskexecutor:main-latest")
.skuMetadataField(
"skuKey",
"us-east-1")
.skuMetadataField(
"sgKey",
"sg-11, sg-22, sg-33, sg-44")
.build())
.build())
.build();
return request;
}
}
| 7,900 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/resourcecluster/TaskExecutorStateTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.common.util.DelegateClock;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.server.core.TestingRpcService;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorHeartbeat;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.server.master.resourcecluster.TaskExecutorReport;
import io.mantisrx.server.master.resourcecluster.TaskExecutorStatusChange;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import io.mantisrx.server.worker.TaskExecutorGateway;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.time.ZoneId;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Before;
import org.junit.Test;
public class TaskExecutorStateTest {
private final AtomicReference<Clock> actual =
new AtomicReference<>(Clock.fixed(Instant.ofEpochSecond(1), ZoneId.systemDefault()));
private final Clock clock = new DelegateClock(actual);
private final TestingRpcService rpc = new TestingRpcService();
private final TaskExecutorGateway gateway = mock(TaskExecutorGateway.class);
private final JobMessageRouter router = mock(JobMessageRouter.class);
private final TaskExecutorState state = TaskExecutorState.of(clock, rpc, router);
private static final TaskExecutorID TASK_EXECUTOR_ID = TaskExecutorID.of("taskExecutorId");
private static final ClusterID CLUSTER_ID = ClusterID.of("clusterId");
private static final String TASK_EXECUTOR_ADDRESS = "127.0.0.1";
private static final String HOST_NAME = "hostName";
private static final WorkerPorts WORKER_PORTS = new WorkerPorts(ImmutableList.of(1, 2, 3, 4, 5));
private static final MachineDefinition MACHINE_DEFINITION =
new MachineDefinition(1.0, 2.0, 3.0, 4.0, 5);
private static final Map<String, String> ATTRIBUTES =
ImmutableMap.of("attr1", "attr2");
private static final WorkerId WORKER_ID = WorkerId.fromIdUnsafe("late-sine-function-tutorial-1-worker-0-1");
@Before
public void setup() {
rpc.registerGateway(TASK_EXECUTOR_ADDRESS, gateway);
}
@Test
public void testRegularLifecycle() {
Instant currentTime;
// Registration
assertTrue(state.onRegistration(
TaskExecutorRegistration.builder()
.taskExecutorID(TASK_EXECUTOR_ID)
.clusterID(CLUSTER_ID)
.taskExecutorAddress(TASK_EXECUTOR_ADDRESS)
.hostname(HOST_NAME)
.workerPorts(WORKER_PORTS)
.machineDefinition(MACHINE_DEFINITION)
.taskExecutorAttributes(ATTRIBUTES)
.build()));
assertTrue(state.isRegistered());
assertFalse(state.isDisconnected());
// heartbeat after registration
currentTime = tick();
assertTrue(state.onHeartbeat(new TaskExecutorHeartbeat(TASK_EXECUTOR_ID, CLUSTER_ID, TaskExecutorReport.available())));
assertTrue(state.isRegistered());
assertFalse(state.isAssigned());
assertFalse(state.isRunningTask());
assertEquals(currentTime, state.getLastActivity());
// assignment
currentTime = tick();
assertTrue(state.onAssignment(WORKER_ID));
assertTrue(state.isRegistered());
assertTrue(state.isAssigned());
assertFalse(state.isRunningTask());
// status change to running
currentTime = tick();
assertTrue(state.onTaskExecutorStatusChange(new TaskExecutorStatusChange(TASK_EXECUTOR_ID, CLUSTER_ID, TaskExecutorReport.occupied(WORKER_ID))));
assertTrue(state.isRegistered());
assertFalse(state.isAssigned());
assertTrue(state.isRunningTask());
assertEquals(currentTime, state.getLastActivity());
// heartbeat when running
currentTime = tick();
assertFalse(state.onHeartbeat(new TaskExecutorHeartbeat(TASK_EXECUTOR_ID, CLUSTER_ID, TaskExecutorReport.occupied(WORKER_ID))));
assertTrue(state.isRegistered());
assertFalse(state.isAssigned());
assertTrue(state.isRunningTask());
assertEquals(currentTime, state.getLastActivity());
// stopping the task
currentTime = tick();
assertTrue(state.onTaskExecutorStatusChange(new TaskExecutorStatusChange(TASK_EXECUTOR_ID, CLUSTER_ID, TaskExecutorReport.available())));
assertTrue(state.isRegistered());
assertFalse(state.isAssigned());
assertFalse(state.isRunningTask());
assertEquals(currentTime, state.getLastActivity());
}
@Test
public void testInitializationLifecycle() {
Instant currentTime;
// Registration
assertTrue(state.onRegistration(TaskExecutorRegistration.builder()
.taskExecutorID(TASK_EXECUTOR_ID)
.clusterID(CLUSTER_ID)
.taskExecutorAddress(TASK_EXECUTOR_ADDRESS)
.hostname(HOST_NAME)
.workerPorts(WORKER_PORTS)
.machineDefinition(MACHINE_DEFINITION)
.taskExecutorAttributes(ATTRIBUTES)
.build()));
assertTrue(state.isRegistered());
assertFalse(state.isDisconnected());
// heartbeat after registration
currentTime = tick();
assertTrue(state.onHeartbeat(new TaskExecutorHeartbeat(TASK_EXECUTOR_ID, CLUSTER_ID, TaskExecutorReport.occupied(WORKER_ID))));
assertTrue(state.isRegistered());
assertFalse(state.isAssigned());
assertTrue(state.isRunningTask());
assertFalse(state.isAvailable());
assertEquals(currentTime, state.getLastActivity());
}
private Instant tick() {
return actual.updateAndGet(currentTime -> Clock.offset(currentTime, Duration.ofSeconds(1))).instant();
}
}
| 7,901 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/resourcecluster/ResourceClusterActorTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.actor.Status.Failure;
import akka.testkit.javadsl.TestKit;
import io.mantisrx.common.Ack;
import io.mantisrx.common.WorkerConstants;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetActiveJobsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetClusterUsageRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetTaskExecutorStatusRequest;
import io.mantisrx.master.resourcecluster.proto.GetClusterIdleInstancesRequest;
import io.mantisrx.master.resourcecluster.proto.GetClusterIdleInstancesResponse;
import io.mantisrx.master.resourcecluster.proto.GetClusterUsageResponse;
import io.mantisrx.master.resourcecluster.proto.GetClusterUsageResponse.UsageByGroupKey;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.server.core.TestingRpcService;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import io.mantisrx.server.master.resourcecluster.PagedActiveJobOverview;
import io.mantisrx.server.master.resourcecluster.ResourceCluster;
import io.mantisrx.server.master.resourcecluster.ResourceCluster.ResourceOverview;
import io.mantisrx.server.master.resourcecluster.ResourceCluster.TaskExecutorNotFoundException;
import io.mantisrx.server.master.resourcecluster.ResourceCluster.TaskExecutorStatus;
import io.mantisrx.server.master.resourcecluster.TaskExecutorAllocationRequest;
import io.mantisrx.server.master.resourcecluster.TaskExecutorHeartbeat;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.server.master.resourcecluster.TaskExecutorReport;
import io.mantisrx.server.master.resourcecluster.TaskExecutorStatusChange;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import io.mantisrx.server.master.scheduler.WorkerOnDisabledVM;
import io.mantisrx.server.worker.TaskExecutorGateway;
import io.mantisrx.server.worker.TaskExecutorGateway.TaskNotFoundException;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import org.apache.flink.util.ExceptionUtils;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.ArgumentMatchers;
import org.mockito.Matchers;
public class ResourceClusterActorTest {
private static final TaskExecutorID TASK_EXECUTOR_ID = TaskExecutorID.of("taskExecutorId");
private static final TaskExecutorID TASK_EXECUTOR_ID_2 = TaskExecutorID.of("taskExecutorId2");
private static final TaskExecutorID TASK_EXECUTOR_ID_3 = TaskExecutorID.of("taskExecutorId3");
private static final String TASK_EXECUTOR_ADDRESS = "address";
private static final ClusterID CLUSTER_ID = ClusterID.of("clusterId");
private static final Duration heartbeatTimeout = Duration.ofSeconds(10);
private static final Duration checkForDisabledExecutorsInterval = Duration.ofSeconds(10);
private static final Duration assignmentTimeout = Duration.ofSeconds(1);
private static final String HOST_NAME = "hostname";
private static final ContainerSkuID CONTAINER_DEF_ID_1 = ContainerSkuID.of("SKU1");
private static final ContainerSkuID CONTAINER_DEF_ID_2 = ContainerSkuID.of("SKU2");
private static final ContainerSkuID CONTAINER_DEF_ID_3 = ContainerSkuID.of("SKU3");
private static final WorkerPorts WORKER_PORTS = new WorkerPorts(1, 2, 3, 4, 5);
private static final MachineDefinition MACHINE_DEFINITION =
new MachineDefinition(2f, 2014, 128.0, 1024, 1);
private static final MachineDefinition MACHINE_DEFINITION_2 =
new MachineDefinition(4f, 4028, 128.0, 1024, 1);
private static final Map<String, String> ATTRIBUTES =
ImmutableMap.of("attr1", "attr1");
private static final Map<String, String> ATTRIBUTES2 =
ImmutableMap.of("attr2", "attr2");
private static final TaskExecutorRegistration TASK_EXECUTOR_REGISTRATION =
TaskExecutorRegistration.builder()
.taskExecutorID(TASK_EXECUTOR_ID)
.clusterID(CLUSTER_ID)
.taskExecutorAddress(TASK_EXECUTOR_ADDRESS)
.hostname(HOST_NAME)
.workerPorts(WORKER_PORTS)
.machineDefinition(MACHINE_DEFINITION)
.taskExecutorAttributes(
ImmutableMap.of(
WorkerConstants.WORKER_CONTAINER_DEFINITION_ID, CONTAINER_DEF_ID_1.getResourceID(),
"attr1", "attr1"))
.build();
private static final TaskExecutorRegistration TASK_EXECUTOR_REGISTRATION_2 =
TaskExecutorRegistration.builder()
.taskExecutorID(TASK_EXECUTOR_ID_2)
.clusterID(CLUSTER_ID)
.taskExecutorAddress(TASK_EXECUTOR_ADDRESS)
.hostname(HOST_NAME)
.workerPorts(WORKER_PORTS)
.machineDefinition(MACHINE_DEFINITION)
.taskExecutorAttributes(
ImmutableMap.of(
WorkerConstants.WORKER_CONTAINER_DEFINITION_ID, CONTAINER_DEF_ID_2.getResourceID(),
"attr2", "attr2"))
.build();
private static final TaskExecutorRegistration TASK_EXECUTOR_REGISTRATION_3 =
TaskExecutorRegistration.builder()
.taskExecutorID(TASK_EXECUTOR_ID_3)
.clusterID(CLUSTER_ID)
.taskExecutorAddress(TASK_EXECUTOR_ADDRESS)
.hostname(HOST_NAME)
.workerPorts(WORKER_PORTS)
.machineDefinition(MACHINE_DEFINITION_2)
.taskExecutorAttributes(
ImmutableMap.of(
WorkerConstants.WORKER_CONTAINER_DEFINITION_ID, CONTAINER_DEF_ID_2.getResourceID(),
"attr2", "attr2"))
.build();
private static final WorkerId WORKER_ID =
WorkerId.fromIdUnsafe("late-sine-function-tutorial-1-worker-0-1");
static ActorSystem actorSystem;
private final TestingRpcService rpcService = new TestingRpcService();
private final TaskExecutorGateway gateway = mock(TaskExecutorGateway.class);
private MantisJobStore mantisJobStore;
private ActorRef resourceClusterActor;
private ResourceCluster resourceCluster;
private JobMessageRouter jobMessageRouter;
@BeforeClass
public static void setup() {
actorSystem = ActorSystem.create();
}
@AfterClass
public static void teardown() {
TestKit.shutdownActorSystem(actorSystem);
actorSystem = null;
}
@Before
public void setupRpcService() {
rpcService.registerGateway(TASK_EXECUTOR_ADDRESS, gateway);
mantisJobStore = mock(MantisJobStore.class);
jobMessageRouter = mock(JobMessageRouter.class);
}
@Before
public void setupActor() {
final Props props =
ResourceClusterActor.props(
CLUSTER_ID,
heartbeatTimeout,
assignmentTimeout,
checkForDisabledExecutorsInterval,
Clock.systemDefaultZone(),
rpcService,
mantisJobStore,
jobMessageRouter,
0,
"",
false);
resourceClusterActor = actorSystem.actorOf(props);
resourceCluster =
new ResourceClusterAkkaImpl(
resourceClusterActor,
Duration.ofSeconds(1),
CLUSTER_ID,
() -> 10000);
}
@Test
public void testRegistration() throws Exception {
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION).get());
assertEquals(ImmutableList.of(TASK_EXECUTOR_ID), resourceCluster.getRegisteredTaskExecutors().get());
}
@Test
public void testInitializationAfterRestart() throws Exception {
when(mantisJobStore.loadAllDisableTaskExecutorsRequests(ArgumentMatchers.eq(CLUSTER_ID)))
.thenReturn(ImmutableList.of());
when(mantisJobStore.getTaskExecutor(ArgumentMatchers.eq(TASK_EXECUTOR_ID))).thenReturn(TASK_EXECUTOR_REGISTRATION);
assertEquals(
Ack.getInstance(),
resourceCluster.initializeTaskExecutor(TASK_EXECUTOR_ID, WORKER_ID).get());
assertEquals(ImmutableList.of(TASK_EXECUTOR_ID), resourceCluster.getBusyTaskExecutors().get());
}
@Test
public void testGetFreeTaskExecutors() throws Exception {
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION).get());
assertEquals(Ack.getInstance(),
resourceCluster
.heartBeatFromTaskExecutor(
new TaskExecutorHeartbeat(
TASK_EXECUTOR_ID,
CLUSTER_ID,
TaskExecutorReport.available())).get());
assertEquals(
TASK_EXECUTOR_ID,
resourceCluster.getTaskExecutorFor(TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION, null, 0)).get());
assertEquals(
TASK_EXECUTOR_ID,
resourceCluster.getTaskExecutorAssignedFor(WORKER_ID).get());
assertEquals(ImmutableList.of(), resourceCluster.getAvailableTaskExecutors().get());
assertEquals(ImmutableList.of(TASK_EXECUTOR_ID), resourceCluster.getRegisteredTaskExecutors().get());
}
@Test
public void testGetTaskExecutorsUsageAndList() throws Exception {
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION).get());
assertEquals(Ack.getInstance(),
resourceCluster
.heartBeatFromTaskExecutor(
new TaskExecutorHeartbeat(
TASK_EXECUTOR_ID,
CLUSTER_ID,
TaskExecutorReport.available())).get());
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION_2).get());
assertEquals(Ack.getInstance(),
resourceCluster
.heartBeatFromTaskExecutor(
new TaskExecutorHeartbeat(
TASK_EXECUTOR_ID_2,
CLUSTER_ID,
TaskExecutorReport.available())).get());
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION_3).get());
assertEquals(Ack.getInstance(),
resourceCluster
.heartBeatFromTaskExecutor(
new TaskExecutorHeartbeat(
TASK_EXECUTOR_ID_3,
CLUSTER_ID,
TaskExecutorReport.available())).get());
// Test get cluster usage
TestKit probe = new TestKit(actorSystem);
resourceClusterActor.tell(new GetClusterUsageRequest(
CLUSTER_ID, ResourceClusterScalerActor.groupKeyFromTaskExecutorDefinitionIdFunc),
probe.getRef());
GetClusterUsageResponse usageRes = probe.expectMsgClass(GetClusterUsageResponse.class);
assertEquals(2, usageRes.getUsages().size());
assertEquals(1, usageRes.getUsages().stream()
.filter(usage -> Objects.equals(usage.getUsageGroupKey(), CONTAINER_DEF_ID_1.getResourceID())).count());
UsageByGroupKey usage1 =
usageRes.getUsages().stream()
.filter(usage -> Objects.equals(usage.getUsageGroupKey(), CONTAINER_DEF_ID_1.getResourceID()))
.findFirst().get();
assertEquals(1, usage1.getIdleCount());
assertEquals(1, usage1.getTotalCount());
// test get TE status
resourceClusterActor.tell(new GetTaskExecutorStatusRequest(TASK_EXECUTOR_ID_2, CLUSTER_ID), probe.getRef());
TaskExecutorStatus teStatusRes = probe.expectMsgClass(TaskExecutorStatus.class);
assertEquals(TASK_EXECUTOR_REGISTRATION_2, teStatusRes.getRegistration());
// test get invalid TE status
resourceClusterActor.tell(new GetTaskExecutorStatusRequest(TaskExecutorID.of("invalid"), CLUSTER_ID),
probe.getRef());
Failure teNotFoundStatusRes = probe.expectMsgClass(Failure.class);
assertTrue(teNotFoundStatusRes.cause() instanceof TaskExecutorNotFoundException);
assertEquals(1, usageRes.getUsages().stream()
.filter(usage -> Objects.equals(usage.getUsageGroupKey(), CONTAINER_DEF_ID_2.getResourceID())).count());
UsageByGroupKey usage2 =
usageRes.getUsages().stream()
.filter(usage -> Objects.equals(usage.getUsageGroupKey(), CONTAINER_DEF_ID_2.getResourceID()))
.findFirst().get();
assertEquals(2, usage2.getIdleCount());
assertEquals(2, usage2.getTotalCount());
// test get empty job list
resourceClusterActor.tell(new GetActiveJobsRequest(
CLUSTER_ID),
probe.getRef());
PagedActiveJobOverview jobsList = probe.expectMsgClass(PagedActiveJobOverview.class);
assertEquals(0, jobsList.getActiveJobs().size());
assertEquals(0, jobsList.getEndPosition());
// test get idle list
resourceClusterActor.tell(
GetClusterIdleInstancesRequest.builder()
.clusterID(CLUSTER_ID)
.maxInstanceCount(2)
.skuId(CONTAINER_DEF_ID_2)
.build(),
probe.getRef());
GetClusterIdleInstancesResponse idleInstancesResponse =
probe.expectMsgClass(GetClusterIdleInstancesResponse.class);
assertEquals(ImmutableList.of(TASK_EXECUTOR_ID_3, TASK_EXECUTOR_ID_2), idleInstancesResponse.getInstanceIds());
assertEquals(CONTAINER_DEF_ID_2, idleInstancesResponse.getSkuId());
assertEquals(
TASK_EXECUTOR_ID_3,
resourceCluster.getTaskExecutorFor(TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION_2, null, 0)).get());
probe = new TestKit(actorSystem);
resourceClusterActor.tell(new GetClusterUsageRequest(
CLUSTER_ID, ResourceClusterScalerActor.groupKeyFromTaskExecutorDefinitionIdFunc),
probe.getRef());
usageRes = probe.expectMsgClass(GetClusterUsageResponse.class);
usage1 =
usageRes.getUsages().stream()
.filter(usage -> usage.getUsageGroupKey().equals(CONTAINER_DEF_ID_1.getResourceID())).findFirst().get();
assertEquals(1, usage1.getIdleCount());
assertEquals(1, usage1.getTotalCount());
usage2 =
usageRes.getUsages().stream()
.filter(usage -> usage.getUsageGroupKey().equals(CONTAINER_DEF_ID_2.getResourceID())).findFirst().get();
assertEquals(1, usage2.getIdleCount());
assertEquals(2, usage2.getTotalCount());
// test get idle list
resourceClusterActor.tell(
GetClusterIdleInstancesRequest.builder()
.clusterID(CLUSTER_ID)
.maxInstanceCount(2)
.skuId(CONTAINER_DEF_ID_1)
.build(),
probe.getRef());
idleInstancesResponse =
probe.expectMsgClass(GetClusterIdleInstancesResponse.class);
assertEquals(ImmutableList.of(TASK_EXECUTOR_ID), idleInstancesResponse.getInstanceIds());
assertEquals(CONTAINER_DEF_ID_1, idleInstancesResponse.getSkuId());
assertEquals(
TASK_EXECUTOR_ID_2,
resourceCluster.getTaskExecutorFor(TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION, null, 0)).get());
probe = new TestKit(actorSystem);
resourceClusterActor.tell(new GetClusterUsageRequest(
CLUSTER_ID, ResourceClusterScalerActor.groupKeyFromTaskExecutorDefinitionIdFunc),
probe.getRef());
usageRes = probe.expectMsgClass(GetClusterUsageResponse.class);
usage1 =
usageRes.getUsages().stream()
.filter(usage -> usage.getUsageGroupKey().equals(CONTAINER_DEF_ID_1.getResourceID())).findFirst().get();
assertEquals(1, usage1.getIdleCount());
assertEquals(1, usage1.getTotalCount());
// test get non-empty job list
resourceClusterActor.tell(new GetActiveJobsRequest(
CLUSTER_ID),
probe.getRef());
jobsList = probe.expectMsgClass(PagedActiveJobOverview.class);
assertEquals(1, jobsList.getActiveJobs().size());
assertTrue(jobsList.getActiveJobs().contains(WORKER_ID.getJobId()));
assertEquals(1, jobsList.getEndPosition());
// test get idle list
resourceClusterActor.tell(
GetClusterIdleInstancesRequest.builder()
.clusterID(CLUSTER_ID)
.maxInstanceCount(2)
.skuId(CONTAINER_DEF_ID_1)
.build(),
probe.getRef());
idleInstancesResponse =
probe.expectMsgClass(GetClusterIdleInstancesResponse.class);
assertEquals(ImmutableList.of(TASK_EXECUTOR_ID), idleInstancesResponse.getInstanceIds());
assertEquals(CONTAINER_DEF_ID_1, idleInstancesResponse.getSkuId());
usage2 =
usageRes.getUsages().stream()
.filter(usage -> usage.getUsageGroupKey().equalsIgnoreCase(CONTAINER_DEF_ID_2.getResourceID()))
.findFirst().get();
assertEquals(0, usage2.getIdleCount());
assertEquals(2, usage2.getTotalCount());
}
@Test
public void testAssignmentTimeout() throws Exception {
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION).get());
assertEquals(Ack.getInstance(),
resourceCluster
.heartBeatFromTaskExecutor(
new TaskExecutorHeartbeat(
TASK_EXECUTOR_ID,
CLUSTER_ID,
TaskExecutorReport.available())).get());
assertEquals(
TASK_EXECUTOR_ID,
resourceCluster.getTaskExecutorFor(TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION, null, 0)).get());
assertEquals(ImmutableList.of(), resourceCluster.getAvailableTaskExecutors().get());
Thread.sleep(2000);
assertEquals(ImmutableList.of(TASK_EXECUTOR_ID), resourceCluster.getAvailableTaskExecutors().get());
assertEquals(
TASK_EXECUTOR_ID,
resourceCluster.getTaskExecutorFor(TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION, null, 0)).get());
}
@Test
public void testGetMultipleActiveJobs() throws ExecutionException, InterruptedException {
final int n = 10;
List<String> expectedJobIdList = new ArrayList<>(n);
for (int i = 0; i < n * 2; i ++) {
int idx = (i % n);
TaskExecutorID taskExecutorID = TaskExecutorID.of("taskExecutorId" + i);
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(
TaskExecutorRegistration.builder()
.taskExecutorID(taskExecutorID)
.clusterID(CLUSTER_ID)
.taskExecutorAddress(TASK_EXECUTOR_ADDRESS)
.hostname(HOST_NAME + i)
.workerPorts(WORKER_PORTS)
.machineDefinition(MACHINE_DEFINITION)
.taskExecutorAttributes(
ImmutableMap.of(
WorkerConstants.WORKER_CONTAINER_DEFINITION_ID, CONTAINER_DEF_ID_1.getResourceID(),
"attr1", "attr1"))
.build()
).get());
assertEquals(Ack.getInstance(),
resourceCluster
.heartBeatFromTaskExecutor(
new TaskExecutorHeartbeat(
taskExecutorID,
CLUSTER_ID,
TaskExecutorReport.available())).get());
WorkerId workerId =
WorkerId.fromIdUnsafe(String.format("late-sine-function-tutorial-%d-worker-%d-1", idx, i));
if (i < n) {
expectedJobIdList.add(String.format("late-sine-function-tutorial-%d", idx));
}
assertEquals(
taskExecutorID,
resourceCluster.getTaskExecutorFor(TaskExecutorAllocationRequest.of(workerId, MACHINE_DEFINITION, null, 0))
.get());
}
TestKit probe = new TestKit(actorSystem);
resourceClusterActor.tell(new GetActiveJobsRequest(
CLUSTER_ID),
probe.getRef());
PagedActiveJobOverview jobsList = probe.expectMsgClass(PagedActiveJobOverview.class);
assertEquals(n, jobsList.getActiveJobs().size());
assertEquals(expectedJobIdList, jobsList.getActiveJobs());
assertEquals(n, jobsList.getEndPosition());
List<String> resJobsList = new ArrayList<>();
int start = 0;
do {
resourceClusterActor.tell(
GetActiveJobsRequest.builder()
.clusterID(CLUSTER_ID)
.startingIndex(Optional.of(start))
.pageSize(Optional.of(5))
.build(),
probe.getRef());
jobsList = probe.expectMsgClass(PagedActiveJobOverview.class);
resJobsList.addAll(jobsList.getActiveJobs());
assertTrue(jobsList.getActiveJobs().size() <= 5);
start = jobsList.getEndPosition();
} while (jobsList.getActiveJobs().size() > 0);
assertEquals(expectedJobIdList, resJobsList);
}
@Test
public void testIfDisableTaskExecutorRequestsMarkTaskExecutorsAsDisabled() throws Exception {
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION).get());
// mark task executor as disabled with an expiry set to 10 seconds
resourceCluster.disableTaskExecutorsFor(ATTRIBUTES, Instant.now().plus(Duration.ofDays(1)), Optional.empty()).get();
assertEquals(
ImmutableList.of(),
resourceCluster.getAvailableTaskExecutors().get());
assertEquals(
new ResourceOverview(1, 0, 0, 0, 1),
resourceCluster.resourceOverview().get());
}
@Test
public void testIfDisableTaskExecutorRequestsMarkLateTaskExecutorsAsDisabled() throws Exception {
// mark task executor as disabled with an expiry set to 10 seconds
resourceCluster.disableTaskExecutorsFor(ATTRIBUTES, Instant.now().plus(Duration.ofDays(1)), Optional.empty()).get();
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION).get());
assertEquals(
ImmutableList.of(),
resourceCluster.getAvailableTaskExecutors().get());
assertEquals(
new ResourceOverview(1, 0, 0, 0, 1),
resourceCluster.resourceOverview().get());
}
@Test
public void testIfDisableTaskExecutorRequestsAreExpiredCorrectly() throws Exception {
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION).get());
resourceCluster.disableTaskExecutorsFor(ATTRIBUTES, Instant.now().plus(Duration.ofSeconds(1)), Optional.empty()).get();
assertEquals(
new ResourceOverview(1, 0, 0, 0, 1),
resourceCluster.resourceOverview().get());
Thread.sleep(5000);
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION_2).get());
assertEquals(Ack.getInstance(),
resourceCluster
.heartBeatFromTaskExecutor(
new TaskExecutorHeartbeat(
TASK_EXECUTOR_ID_2,
CLUSTER_ID,
TaskExecutorReport.available())).get());
assertEquals(
new ResourceOverview(2, 1, 0, 0, 1),
resourceCluster.resourceOverview().get());
}
@Test
public void testIfDisabledTaskExecutorRequestsAreInitializedCorrectlyWhenTheControlPlaneStarts() throws Exception {
when(mantisJobStore.loadAllDisableTaskExecutorsRequests(Matchers.eq(CLUSTER_ID)))
.thenReturn(ImmutableList.of(
new DisableTaskExecutorsRequest(
ATTRIBUTES,
CLUSTER_ID,
Instant.now().plus(Duration.ofDays(1)),
Optional.empty())));
actorSystem.stop(resourceClusterActor);
setupActor();
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION).get());
assertEquals(
new ResourceOverview(1, 0, 0, 0, 1),
resourceCluster.resourceOverview().get());
}
@Test
public void testIfDisabledTaskExecutorsAreNotAvailableForScheduling() throws Exception {
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION).get());
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION_2).get());
assertEquals(
Ack.getInstance(),
resourceCluster.heartBeatFromTaskExecutor(
new TaskExecutorHeartbeat(TASK_EXECUTOR_ID, CLUSTER_ID, TaskExecutorReport.available())).get());
assertEquals(
Ack.getInstance(),
resourceCluster.heartBeatFromTaskExecutor(
new TaskExecutorHeartbeat(TASK_EXECUTOR_ID_2, CLUSTER_ID, TaskExecutorReport.available())).get());
resourceCluster.disableTaskExecutorsFor(ATTRIBUTES, Instant.now().plus(Duration.ofDays(1)), Optional.empty()).get();
assertEquals(
TASK_EXECUTOR_ID_2,
resourceCluster.getTaskExecutorFor(TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION, null, 0)).get());
}
@Test
public void testIfTaskExecutorsThatWereRunningTasksPreviouslyAndRunningCorrectly() throws Exception {
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION).get());
assertEquals(
Ack.getInstance(),
resourceCluster.heartBeatFromTaskExecutor(
new TaskExecutorHeartbeat(TASK_EXECUTOR_ID, CLUSTER_ID, TaskExecutorReport.occupied(WORKER_ID))).get());
resourceCluster.disableTaskExecutorsFor(ATTRIBUTES, Instant.now().plus(Duration.ofSeconds(1)), Optional.empty()).get();
assertEquals(
new ResourceOverview(1, 0, 1, 0, 1),
resourceCluster.resourceOverview().get());
ArgumentCaptor<WorkerEvent> workerEventCaptor = ArgumentCaptor.forClass(WorkerEvent.class);
verify(jobMessageRouter).routeWorkerEvent(workerEventCaptor.capture());
WorkerEvent actualWorkerEvent = workerEventCaptor.getValue();
assertTrue(actualWorkerEvent instanceof WorkerOnDisabledVM);
assertEquals(WORKER_ID, actualWorkerEvent.getWorkerId());
}
@Test(expected = TaskNotFoundException.class)
public void testGetAssignedTaskExecutorAfterTaskCompletes() throws Throwable {
assertEquals(Ack.getInstance(), resourceCluster.registerTaskExecutor(TASK_EXECUTOR_REGISTRATION).join());
assertEquals(
Ack.getInstance(),
resourceCluster.heartBeatFromTaskExecutor(
new TaskExecutorHeartbeat(TASK_EXECUTOR_ID, CLUSTER_ID, TaskExecutorReport.available())).join());
assertEquals(
TASK_EXECUTOR_ID,
resourceCluster.getTaskExecutorFor(TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION, null, 0)).join());
assertEquals(TASK_EXECUTOR_ID, resourceCluster.getTaskExecutorAssignedFor(WORKER_ID).join());
assertEquals(Ack.getInstance(), resourceCluster.notifyTaskExecutorStatusChange(
new TaskExecutorStatusChange(TASK_EXECUTOR_ID, CLUSTER_ID, TaskExecutorReport.occupied(WORKER_ID))).join());
assertEquals(Ack.getInstance(), resourceCluster.notifyTaskExecutorStatusChange(
new TaskExecutorStatusChange(TASK_EXECUTOR_ID, CLUSTER_ID, TaskExecutorReport.available())).join());
try {
TaskExecutorID result = resourceCluster.getTaskExecutorAssignedFor(WORKER_ID).join();
} catch (Exception e) {
throw ExceptionUtils.stripCompletionException(e);
}
}
}
| 7,902 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/resourcecluster/ResourceClusterScalerActorTests.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import static io.mantisrx.master.resourcecluster.ResourceClusterActorTest.actorSystem;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.testkit.javadsl.TestKit;
import io.mantisrx.common.Ack;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetClusterUsageRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterScalerActor.ClusterAvailabilityRule;
import io.mantisrx.master.resourcecluster.ResourceClusterScalerActor.GetRuleSetRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterScalerActor.GetRuleSetResponse;
import io.mantisrx.master.resourcecluster.ResourceClusterScalerActor.ScaleDecision;
import io.mantisrx.master.resourcecluster.ResourceClusterScalerActor.ScaleType;
import io.mantisrx.master.resourcecluster.proto.GetClusterIdleInstancesRequest;
import io.mantisrx.master.resourcecluster.proto.GetClusterIdleInstancesResponse;
import io.mantisrx.master.resourcecluster.proto.GetClusterUsageResponse;
import io.mantisrx.master.resourcecluster.proto.GetClusterUsageResponse.UsageByGroupKey;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleSpec;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceRequest;
import io.mantisrx.master.resourcecluster.writable.ResourceClusterScaleRulesWritable;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import io.mantisrx.shaded.com.google.common.collect.ImmutableSet;
import java.io.IOException;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.time.ZoneId;
import java.util.HashSet;
import java.util.Optional;
import java.util.Set;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class ResourceClusterScalerActorTests {
private static final ClusterID CLUSTER_ID = ClusterID.of("clusterId");
private static final ContainerSkuID skuSmall = ContainerSkuID.of("small");
private static final ContainerSkuID skuMedium = ContainerSkuID.of("medium");
private static final ContainerSkuID skuLarge = ContainerSkuID.of("large");
private ActorRef scalerActor;
private IMantisPersistenceProvider storageProvider;
private TestKit clusterActorProbe;
private TestKit hostActorProbe;
private static final MachineDefinition MACHINE_DEFINITION_S =
new MachineDefinition(2, 2048, 700, 10240, 5);
private static final MachineDefinition MACHINE_DEFINITION_L =
new MachineDefinition(4, 16384, 1400, 81920, 5);
private static final MachineDefinition MACHINE_DEFINITION_M =
new MachineDefinition(3, 4096, 700, 10240, 5);
@BeforeClass
public static void setup() {
actorSystem = ActorSystem.create();
}
@AfterClass
public static void teardown() {
TestKit.shutdownActorSystem(actorSystem);
actorSystem = null;
}
@Before
public void setupMocks() throws IOException {
clusterActorProbe = new TestKit(actorSystem);
hostActorProbe = new TestKit(actorSystem);
this.storageProvider = mock(IMantisPersistenceProvider.class);
when(this.storageProvider.getResourceClusterScaleRules(CLUSTER_ID))
.thenReturn(
ResourceClusterScaleRulesWritable.builder()
.scaleRule(skuSmall.getResourceID(), ResourceClusterScaleSpec.builder()
.clusterId(CLUSTER_ID)
.skuId(skuSmall)
.coolDownSecs(10)
.maxIdleToKeep(10)
.minIdleToKeep(5)
.minSize(11)
.maxSize(15)
.build())
.scaleRule(skuLarge.getResourceID(), ResourceClusterScaleSpec.builder()
.clusterId(CLUSTER_ID)
.skuId(skuLarge)
.coolDownSecs(10)
.maxIdleToKeep(15)
.minIdleToKeep(5)
.minSize(11)
.maxSize(15)
.build())
.build());
}
@Test
public void testScaler() {
final Props props =
ResourceClusterScalerActor.props(
CLUSTER_ID,
Clock.systemDefaultZone(),
Duration.ofSeconds(1),
Duration.ofSeconds(2),
this.storageProvider,
hostActorProbe.getRef(),
clusterActorProbe.getRef());
scalerActor = actorSystem.actorOf(props);
GetClusterUsageRequest req = clusterActorProbe.expectMsgClass(GetClusterUsageRequest.class);
assertEquals(CLUSTER_ID, req.getClusterID());
scalerActor.tell(
GetClusterUsageResponse.builder()
.clusterID(CLUSTER_ID)
.usage(
UsageByGroupKey.builder().usageGroupKey(skuSmall.getResourceID()).idleCount(4).totalCount(10).build())
.usage(
UsageByGroupKey.builder().usageGroupKey(skuLarge.getResourceID()).idleCount(16).totalCount(16).build())
.usage(
UsageByGroupKey.builder().usageGroupKey(skuMedium.getResourceID()).idleCount(8).totalCount(15).build())
.build(),
clusterActorProbe.getRef());
assertEquals(
GetClusterIdleInstancesRequest.builder()
.skuId(skuLarge)
.clusterID(CLUSTER_ID)
.desireSize(15)
.maxInstanceCount(1)
.build(),
clusterActorProbe.expectMsgClass(GetClusterIdleInstancesRequest.class));
assertNotNull(clusterActorProbe.expectMsgClass(Ack.class));
Set<ScaleResourceRequest> decisions = new HashSet<>();
decisions.add(hostActorProbe.expectMsgClass(ScaleResourceRequest.class));
//decisions.add(hostActorProbe.expectMsgClass(ScaleResourceRequest.class));
int newSize = 11;
assertTrue(decisions.contains(
ScaleResourceRequest.builder()
.clusterId(CLUSTER_ID)
.skuId(skuSmall)
.desireSize(newSize)
.build()));
// Test callback from fetch idle list.
ImmutableList<TaskExecutorID> idleInstances = ImmutableList.of(
TaskExecutorID.of("agent1"),
TaskExecutorID.of("agent2"));
scalerActor.tell(
GetClusterIdleInstancesResponse.builder()
.clusterId(CLUSTER_ID)
.instanceIds(idleInstances)
.skuId(skuLarge)
.desireSize(15)
.build(),
clusterActorProbe.getRef());
newSize = 15;
assertEquals(
ScaleResourceRequest.builder()
.clusterId(CLUSTER_ID)
.skuId(skuLarge)
.desireSize(newSize)
.idleInstances(idleInstances)
.build(),
hostActorProbe.expectMsgClass(ScaleResourceRequest.class));
// validate the idle intances are disabled
io.mantisrx.master.resourcecluster.DisableTaskExecutorsRequest disableTEReq =
clusterActorProbe.expectMsgClass(io.mantisrx.master.resourcecluster.DisableTaskExecutorsRequest.class);
io.mantisrx.master.resourcecluster.DisableTaskExecutorsRequest disableTEReq2 =
clusterActorProbe.expectMsgClass(io.mantisrx.master.resourcecluster.DisableTaskExecutorsRequest.class);
assertTrue(disableTEReq.getTaskExecutorID().isPresent());
assertTrue(disableTEReq2.getTaskExecutorID().isPresent());
assertEquals(
ImmutableSet.of(disableTEReq2.getTaskExecutorID().get(), disableTEReq.getTaskExecutorID().get()),
ImmutableSet.copyOf(idleInstances));
// Test trigger again
GetClusterUsageRequest req2 = clusterActorProbe.expectMsgClass(GetClusterUsageRequest.class);
assertEquals(CLUSTER_ID, req2.getClusterID());
}
@Test
public void testScalerRuleSetRefresh() throws InterruptedException, IOException {
final Props props =
ResourceClusterScalerActor.props(
CLUSTER_ID,
Clock.systemDefaultZone(),
Duration.ofSeconds(100),
Duration.ofSeconds(1),
this.storageProvider,
hostActorProbe.getRef(),
clusterActorProbe.getRef());
scalerActor = actorSystem.actorOf(props);
scalerActor.tell(GetRuleSetRequest.builder().build(), clusterActorProbe.getRef());
GetRuleSetResponse rules = clusterActorProbe.expectMsgClass(GetRuleSetResponse.class);
assertEquals(2, rules.getRules().size());
when(this.storageProvider.getResourceClusterScaleRules(CLUSTER_ID))
.thenReturn(
ResourceClusterScaleRulesWritable.builder()
.scaleRule(skuMedium.getResourceID(), ResourceClusterScaleSpec.builder()
.clusterId(CLUSTER_ID)
.skuId(skuMedium)
.coolDownSecs(10)
.maxIdleToKeep(20)
.minIdleToKeep(5)
.minSize(11)
.maxSize(15)
.build())
.build());
Thread.sleep(1500);
scalerActor.tell(GetRuleSetRequest.builder().build(), clusterActorProbe.getRef());
rules = clusterActorProbe.expectMsgClass(GetRuleSetResponse.class);
assertEquals(1, rules.getRules().size());
assertTrue(rules.getRules().containsKey(skuMedium));
}
@Test
public void testRuleCoolDown() {
String skuId = "small";
ClusterAvailabilityRule rule = new ClusterAvailabilityRule(
ResourceClusterScaleSpec.builder()
.clusterId(CLUSTER_ID)
.skuId(ContainerSkuID.of(skuId))
.coolDownSecs(10)
.maxIdleToKeep(10)
.minIdleToKeep(5)
.minSize(11)
.maxSize(15)
.build(),
Clock.fixed(Clock.systemUTC().instant(), ZoneId.systemDefault()),
Instant.MIN,
true);
// Test scale up
UsageByGroupKey usage = UsageByGroupKey.builder()
.usageGroupKey(skuId).idleCount(4).totalCount(10).build();
Optional<ScaleDecision> decision = rule.apply(usage);
int newSize = 11;
assertEquals(
Optional.of(
ScaleDecision.builder()
.clusterId(CLUSTER_ID)
.skuId(ContainerSkuID.of(skuId))
.desireSize(newSize)
.minSize(newSize)
.maxSize(newSize)
.type(ScaleType.ScaleUp)
.build()),
decision);
// test cool down
usage = UsageByGroupKey.builder().usageGroupKey(skuId).idleCount(4).totalCount(10).build();
assertEquals(Optional.empty(), rule.apply(usage));
}
@Test
public void testScaleResourceRequestToRequestName() {
ScaleResourceRequest r1 =
ScaleResourceRequest.builder()
.idleInstance(TaskExecutorID.of("t1"))
.clusterId(CLUSTER_ID)
.skuId(skuLarge)
.build();
assertEquals("clusterId---large-0", r1.getScaleRequestId());
}
@Test
public void testRuleFinishCoolDown() throws InterruptedException {
String skuId = "small";
ClusterAvailabilityRule rule = new ClusterAvailabilityRule(
ResourceClusterScaleSpec.builder()
.clusterId(CLUSTER_ID)
.skuId(ContainerSkuID.of(skuId))
.coolDownSecs(2)
.maxIdleToKeep(10)
.minIdleToKeep(5)
.minSize(11)
.maxSize(15)
.build(),
Clock.systemUTC(),
Instant.MIN,
true);
// Test scale up
UsageByGroupKey usage =
UsageByGroupKey.builder().usageGroupKey(skuId).idleCount(4).totalCount(10).build();
Optional<ScaleDecision> decision = rule.apply(usage);
int newSize = 11;
assertEquals(
Optional.of(
ScaleDecision.builder()
.clusterId(CLUSTER_ID)
.skuId(ContainerSkuID.of(skuId))
.desireSize(newSize)
.minSize(newSize)
.maxSize(newSize)
.type(ScaleType.ScaleUp)
.build()),
decision);
// test cool down
usage = UsageByGroupKey.builder().usageGroupKey(skuId).idleCount(4).totalCount(10).build();
assertEquals(Optional.empty(), rule.apply(usage));
Thread.sleep(Duration.ofSeconds(3).toMillis());
assertEquals(
Optional.of(
ScaleDecision.builder()
.clusterId(CLUSTER_ID)
.skuId(ContainerSkuID.of(skuId))
.desireSize(newSize)
.minSize(newSize)
.maxSize(newSize)
.type(ScaleType.ScaleUp)
.build()),
rule.apply(usage));
}
@Test
public void testRule() {
// TestKit probe = new TestKit(actorSystem);
String skuId = "small";
ClusterAvailabilityRule rule = new ClusterAvailabilityRule(
ResourceClusterScaleSpec.builder()
.clusterId(CLUSTER_ID)
.skuId(ContainerSkuID.of(skuId))
.coolDownSecs(0)
.maxIdleToKeep(10)
.minIdleToKeep(5)
.minSize(11)
.maxSize(15)
.build(),
Clock.fixed(Instant.MIN, ZoneId.systemDefault()),
Instant.MIN,
true);
// Test scale up
UsageByGroupKey usage = UsageByGroupKey.builder()
.usageGroupKey(skuId).idleCount(4).totalCount(10).build();
Optional<ScaleDecision> decision = rule.apply(usage);
int newSize = 11;
assertEquals(
Optional.of(
ScaleDecision.builder()
.clusterId(CLUSTER_ID)
.skuId(ContainerSkuID.of(skuId))
.desireSize(newSize)
.minSize(newSize)
.maxSize(newSize)
.type(ScaleType.ScaleUp)
.build()),
decision);
// Test empty
usage = UsageByGroupKey.builder().usageGroupKey(skuId).idleCount(9).totalCount(11).build();
decision = rule.apply(usage);
assertEquals(
Optional.empty(),
decision);
// Test scale up hits max
usage = UsageByGroupKey.builder().usageGroupKey(skuId).idleCount(0).totalCount(11).build();
decision = rule.apply(usage);
newSize = 15;
assertEquals(
Optional.of(
ScaleDecision.builder()
.clusterId(CLUSTER_ID)
.skuId(ContainerSkuID.of(skuId))
.desireSize(newSize)
.minSize(newSize)
.maxSize(newSize)
.type(ScaleType.ScaleUp)
.build()),
decision);
// Test scale down
usage = UsageByGroupKey.builder().usageGroupKey(skuId).idleCount(15).totalCount(20).build();
decision = rule.apply(usage);
newSize = 15;
assertEquals(
Optional.of(
ScaleDecision.builder()
.clusterId(CLUSTER_ID)
.skuId(ContainerSkuID.of(skuId))
.desireSize(newSize)
.minSize(newSize)
.maxSize(newSize)
.type(ScaleType.ScaleDown)
.build()),
decision);
// Test scale down hits min.
usage = UsageByGroupKey.builder().usageGroupKey(skuId).idleCount(15).totalCount(15).build();
decision = rule.apply(usage);
newSize = 11;
assertEquals(
Optional.of(
ScaleDecision.builder()
.clusterId(CLUSTER_ID)
.skuId(ContainerSkuID.of(skuId))
.desireSize(newSize)
.minSize(newSize)
.maxSize(newSize)
.type(ScaleType.ScaleDown)
.build()),
decision);
}
}
| 7,903 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/LeaderRedirectionRouteTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import akka.NotUsed;
import akka.actor.ActorSystem;
import akka.http.javadsl.ConnectHttp;
import akka.http.javadsl.Http;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.HttpEntity;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.stream.ActorMaterializer;
import akka.stream.javadsl.Flow;
import akka.util.ByteString;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.api.akka.route.v0.MasterDescriptionRoute;
import io.mantisrx.master.jobcluster.job.JobTestHelper;
import io.mantisrx.server.core.master.LocalMasterMonitor;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.core.master.MasterMonitor;
import io.mantisrx.server.master.ILeadershipManager;
import io.mantisrx.server.master.LeaderRedirectionFilter;
import io.mantisrx.server.master.LeadershipManagerLocalImpl;
import java.util.Optional;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class LeaderRedirectionRouteTest {
private final static Logger logger = LoggerFactory.getLogger(LeaderRedirectionRouteTest.class);
private final ActorMaterializer materializer = ActorMaterializer.create(system);
private final Http http = Http.get(system);
private static Thread t;
private static final int serverPort = 8205;
private static final int targetEndpointPort = serverPort;
private static final MasterDescription fakeMasterDesc = new MasterDescription(
"example.com",
"127.0.0.1", targetEndpointPort,
targetEndpointPort + 2,
targetEndpointPort + 4,
"api/postjobstatus",
targetEndpointPort + 6,
System.currentTimeMillis());
private CompletionStage<String> processRespFut(final HttpResponse r, final Optional<Integer> expectedStatusCode) {
logger.info("headers {} {}", r.getHeaders(), r.status());
expectedStatusCode.ifPresent(sc -> assertEquals(sc.intValue(), r.status().intValue()));
assert(r.getHeader("Access-Control-Allow-Origin").isPresent());
assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value());
CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer);
return strictEntity.thenCompose(s ->
s.getDataBytes()
.runFold(ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer)
.thenApply(s2 -> s2.utf8String())
);
}
private String getResponseMessage(final String msg, final Throwable t) {
if (t != null) {
logger.error("got err ", t);
fail(t.getMessage());
} else {
return msg;
}
return "";
}
private static CompletionStage<ServerBinding> binding;
private static ActorSystem system = ActorSystem.create("MasterDescriptionRouteTest");
private static final MasterMonitor masterMonitor = new LocalMasterMonitor(fakeMasterDesc);
private static final ILeadershipManager leadershipMgr = new LeadershipManagerLocalImpl(fakeMasterDesc);
@BeforeClass
public static void setup() throws Exception {
JobTestHelper.deleteAllFiles();
JobTestHelper.createDirsIfRequired();
final CountDownLatch latch = new CountDownLatch(1);
t = new Thread(() -> {
try {
// boot up server using the route as defined below
final Http http = Http.get(system);
final ActorMaterializer materializer = ActorMaterializer.create(system);
TestHelpers.setupMasterConfig();
final MasterDescriptionRoute app = new MasterDescriptionRoute(fakeMasterDesc);
final LeaderRedirectionFilter leaderRedirectionFilter = new LeaderRedirectionFilter(masterMonitor, leadershipMgr);
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = app.createRoute(leaderRedirectionFilter::redirectIfNotLeader).flow(system, materializer);
logger.info("starting test server on port {}", serverPort);
binding = http.bindAndHandle(routeFlow,
ConnectHttp.toHost("localhost", serverPort), materializer);
latch.countDown();
} catch (Exception e) {
logger.info("caught exception", e);
latch.countDown();
e.printStackTrace();
}
});
t.setDaemon(true);
t.start();
latch.await();
}
@AfterClass
public static void teardown() {
logger.info("MasterDescriptionRouteTest teardown");
binding
.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> system.terminate()); // and shutdown when done
t.interrupt();
}
private String masterEndpoint(final String ep) {
return String.format("http://127.0.0.1:%d/api/%s", targetEndpointPort, ep);
}
@Test
public void testMasterInfoAPIWhenLeader() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
// leader is not ready by default
CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(masterEndpoint("masterinfo")));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(503)))
.whenComplete((msg, t) -> {
try {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
assertEquals("Mantis master awaiting to be ready", responseMessage);
} catch (Exception e) {
fail("unexpected error "+ e.getMessage());
}
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
// mark the leader as bootstrapped and ready
leadershipMgr.setLeaderReady();
final CountDownLatch latch2 = new CountDownLatch(1);
final CompletionStage<HttpResponse> respF = http.singleRequest(
HttpRequest.GET(masterEndpoint("masterinfo")));
respF
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((msg, t) -> {
try {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
MasterDescription masterDescription = Jackson.fromJSON(responseMessage, MasterDescription.class);
logger.info("master desc ---> {}", masterDescription);
assertEquals(fakeMasterDesc, masterDescription);
} catch (Exception e) {
fail("unexpected error "+ e.getMessage());
}
latch2.countDown();
});
assertTrue(latch2.await(2, TimeUnit.SECONDS));
leadershipMgr.stopBeingLeader();
responseFuture = http.singleRequest(
HttpRequest.GET(masterEndpoint("masterinfo")));
try {
responseFuture
.thenCompose(r -> {
logger.info("headers {} {}", r.getHeaders(), r.status());
assertEquals(302, r.status().intValue());
assert(r.getHeader("Access-Control-Allow-Origin").isPresent());
assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value());
assert(r.getHeader("Location").isPresent());
assertEquals("http://example.com:"+targetEndpointPort+"/api/masterinfo", r.getHeader("Location").get().value());
CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer);
return strictEntity.thenCompose(s ->
s.getDataBytes()
.runFold(ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer)
.thenApply(s2 -> s2.utf8String())
);
})
.whenComplete((msg, t) -> {
try {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
} catch (Exception e) {
fail("unexpected error "+ e.getMessage());
}
}).toCompletableFuture()
.get(2, TimeUnit.SECONDS);
} catch (ExecutionException e) {
throw new RuntimeException(e);
} catch (TimeoutException e) {
throw new RuntimeException(e);
}
leadershipMgr.becomeLeader();
}
}
| 7,904 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/JacksonTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route;
import static org.junit.Assert.*;
import io.mantisrx.common.Ack;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataView;
import io.mantisrx.server.master.store.MantisWorkerMetadataWritable;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.util.List;
import java.util.Optional;
import org.junit.Test;
public class JacksonTest {
private static final ObjectMapper objectMapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
@Test
public void testDeser4() throws IOException {
final String jsonStr = "[{\"jobMetadata\":{\"jobId\":\"sine-function-1\",\"name\":\"sine-function\"," +
"\"user\":\"nmahilani\",\"submittedAt\":1527703650220,\"jarUrl\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.2.9.zip\"," +
"\"numStages\":2,\"sla\":{\"runtimeLimitSecs\":0,\"minRuntimeSecs\":0,\"slaType\":\"Lossy\",\"durationType\":\"Perpetual\",\"userProvidedType\":\"\"}," +
"\"state\":\"Accepted\",\"subscriptionTimeoutSecs\":0,\"parameters\":[{\"name\":\"useRandom\",\"value\":\"True\"}],\"nextWorkerNumberToUse\":11," +
"\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":25,\\\"intervalMs\\\":60000}\"}," +
"\"labels\":[{\"name\":\"_mantis.user\",\"value\":\"nmahilani\"},{\"name\":\"_mantis.ownerEmail\",\"value\":\"nmahilani@netflix.com\"}," +
"{\"name\":\"_mantis.jobType\",\"value\":\"other\"},{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.artifact.version\",\"value\":\"0.2.9\"}]}," +
"\"stageMetadataList\":[{\"jobId\":\"sine-function-1\",\"stageNum\":0,\"numStages\":2,\"machineDefinition\":{\"cpuCores\":1.0,\"memoryMB\":200.0,\"networkMbps\":128.0,\"diskMB\":1024.0,\"numPorts\":1}," +
"\"numWorkers\":1,\"hardConstraints\":null,\"softConstraints\":null,\"scalingPolicy\":null,\"scalable\":false}," +
"{\"jobId\":\"sine-function-1\",\"stageNum\":1,\"numStages\":2,\"machineDefinition\":{\"cpuCores\":1.0,\"memoryMB\":200.0,\"networkMbps\":128.0,\"diskMB\":1024.0,\"numPorts\":1},\"numWorkers\":1,\"hardConstraints\":[],\"softConstraints\":[\"M4Cluster\"]," +
"\"scalingPolicy\":{\"stage\":1,\"min\":1,\"max\":10,\"increment\":2,\"decrement\":1,\"coolDownSecs\":600," +
"\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":15.0,\"scaleUpAbovePct\":75.0,\"rollingCount\":{\"count\":12,\"of\":20}}},\"enabled\":true},\"scalable\":true}]," +
"\"workerMetadataList\":[{\"workerIndex\":0,\"workerNumber\":2,\"jobId\":\"sine-function-1\",\"stageNum\":0,\"numberOfPorts\":4,\"metricsPort\":0,\"consolePort\":0," +
"\"debugPort\":-1,\"ports\":[],\"state\":\"Accepted\",\"slave\":null,\"slaveID\":null,\"cluster\":{\"present\":false},\"acceptedAt\":1527703650231,\"launchedAt\":0,\"startingAt\":0,\"startedAt\":0," +
"\"completedAt\":0,\"reason\":null,\"resubmitOf\":-1,\"totalResubmitCount\":0},{\"workerIndex\":0,\"workerNumber\":3,\"jobId\":\"sine-function-1\",\"stageNum\":1,\"numberOfPorts\":4,\"metricsPort\":0,\"consolePort\":0,\"debugPort\":-1,\"ports\":[],\"state\":\"Accepted\"," +
"\"slave\":null,\"slaveID\":null,\"cluster\":{\"present\":false},\"acceptedAt\":1527703650232,\"launchedAt\":0,\"startingAt\":0,\"startedAt\":0,\"completedAt\":0," +
"\"reason\":null,\"resubmitOf\":-1,\"totalResubmitCount\":0}]}]";
final List<MantisJobMetadataView> jobIdInfos = Jackson.fromJSON(objectMapper, jsonStr, new TypeReference<List<MantisJobMetadataView>>() { });
assertEquals(1, jobIdInfos.size());
final MantisJobMetadataView jobInfo = jobIdInfos.get(0);
assertEquals("sine-function-1", jobInfo.getJobMetadata().getJobId());
assertEquals(2, jobInfo.getWorkerMetadataList().size());
assertEquals(2, jobInfo.getStageMetadataList().size());
MantisWorkerMetadataWritable mwm = jobInfo.getWorkerMetadataList().get(0);
mwm.setCluster(Optional.of("test"));
final String out = objectMapper.writer(Jackson.DEFAULT_FILTER_PROVIDER).writeValueAsString(mwm);
assertTrue(out.contains("\"cluster\":{\"present\":true},"));
final String serializeAgain = objectMapper.writeValueAsString(objectMapper.readValue(out, MantisWorkerMetadataWritable.class));
assertFalse(serializeAgain.contains("\"cluster\":{\"present\":true},"));
assertTrue(serializeAgain.contains("\"cluster\":{\"present\":false},"));
}
@Test
public void testOptionalSerialization() throws JsonProcessingException {
assertEquals("{\"present\":false}", objectMapper.writeValueAsString(Optional.empty()));
assertEquals("{\"present\":true}", objectMapper.writeValueAsString(Optional.of("test")));
}
@Test
public void testAckSerialization() throws Exception {
Ack ack = Ack.getInstance();
String s = Jackson.toJson(ack);
assertEquals("{}", s);
try (ByteArrayOutputStream bos = new ByteArrayOutputStream(); ObjectOutputStream out = new ObjectOutputStream(bos)) {
out.writeObject(ack);
out.flush();
byte[] actual = bos.toByteArray();
byte[] expected = {-84, -19, 0, 5, 115, 114, 0, 22, 105, 111, 46, 109, 97, 110, 116, 105, 115, 114, 120, 46, 99, 111, 109, 109, 111, 110, 46, 65, 99, 107, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 120, 112};
assertArrayEquals(expected, actual);
}
}
}
| 7,905 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/LeaderRedirectionFilterTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import akka.http.javadsl.server.AllDirectives;
import akka.http.javadsl.server.Route;
import io.mantisrx.server.core.master.LocalMasterMonitor;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.core.master.MasterMonitor;
import io.mantisrx.server.master.ILeadershipManager;
import io.mantisrx.server.master.LeaderRedirectionFilter;
import io.mantisrx.server.master.LeadershipManagerLocalImpl;
import org.junit.Test;
public class LeaderRedirectionFilterTest extends AllDirectives {
@Test
public void testRouteUnchangedIfLeader() {
// Become leader and make Master monitor return the localhost master, filter should return input Route
final MasterDescription fakeMasterDesc = new MasterDescription(
"localhost",
"127.0.0.1", 8100,
8100 + 2,
8100 + 4,
"api/postjobstatus",
8100 + 6,
System.currentTimeMillis());
MasterMonitor masterMonitor = new LocalMasterMonitor(fakeMasterDesc);
ILeadershipManager leadershipManager = new LeadershipManagerLocalImpl(fakeMasterDesc);
leadershipManager.becomeLeader();
LeaderRedirectionFilter filter = new LeaderRedirectionFilter(masterMonitor, leadershipManager);
Route testRoute = route(path("test", () -> complete("done")));
Route route = filter.redirectIfNotLeader(testRoute);
// leader is not ready by default
assertNotEquals(testRoute, route);
// mark leader ready
leadershipManager.setLeaderReady();
Route route2 = filter.redirectIfNotLeader(testRoute);
// leader is not ready by default
assertEquals(testRoute, route2);
}
@Test
public void testRouteChangesIfNotLeader() {
final MasterDescription fakeMasterDesc = new MasterDescription(
"localhost",
"127.0.0.1", 8100,
8100 + 2,
8100 + 4,
"api/postjobstatus",
8100 + 6,
System.currentTimeMillis());
MasterMonitor masterMonitor = new LocalMasterMonitor(fakeMasterDesc);
ILeadershipManager leadershipManager = new LeadershipManagerLocalImpl(fakeMasterDesc);
// Stop being leader, the filter should redirect so the returned Route is different from the input Route
leadershipManager.stopBeingLeader();
LeaderRedirectionFilter filter = new LeaderRedirectionFilter(masterMonitor, leadershipManager);
Route testRoute = route(path("test", () -> complete("done")));
Route route = filter.redirectIfNotLeader(testRoute);
// filter should return input Route if we are current leader
assertNotEquals(testRoute, route);
}
}
| 7,906 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v1/JobClustersRouteTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.http.javadsl.ConnectHttp;
import akka.http.javadsl.Http;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.ContentTypes;
import akka.http.javadsl.model.HttpEntities;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.http.javadsl.model.StatusCodes;
import akka.stream.ActorMaterializer;
import akka.stream.javadsl.Flow;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.JobClustersManagerActor;
import io.mantisrx.master.api.akka.payloads.JobClusterPayloads;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandlerAkkaImpl;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.scheduler.FakeMantisScheduler;
import io.mantisrx.server.master.persistence.FileBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.JsonNode;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import org.apache.commons.io.FileUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JobClustersRouteTest extends RouteTestBase {
private final static Logger logger = LoggerFactory.getLogger(JobClustersRouteTest.class);
private static Thread t;
private static final int SERVER_PORT = 8200;
private static CompletionStage<ServerBinding> binding;
private static File stateDirectory;
private static String TEST_CLUSTER_NAME = "sine-function";
public JobClustersRouteTest() {
super("JobClustersRouteTest", SERVER_PORT);
}
@BeforeClass
public static void setup() throws Exception {
TestHelpers.setupMasterConfig();
final CountDownLatch latch = new CountDownLatch(1);
stateDirectory = Files.createTempDirectory("test").toFile();
t = new Thread(() -> {
try {
// boot up server using the route as defined below
final Http http = Http.get(system);
final ActorMaterializer materializer = ActorMaterializer.create(system);
final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(
new AuditEventSubscriberLoggingImpl(),
new StatusEventSubscriberLoggingImpl(),
new WorkerEventSubscriberLoggingImpl());
ActorRef jobClustersManagerActor = system.actorOf(
JobClustersManagerActor.props(
new MantisJobStore(new FileBasedPersistenceProvider(stateDirectory, true)),
lifecycleEventPublisher,
CostsCalculator.noop()),
"jobClustersManager");
MantisSchedulerFactory mantisSchedulerFactory = mock(MantisSchedulerFactory.class);
MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor);
when(mantisSchedulerFactory.forJob(any())).thenReturn(fakeScheduler);
jobClustersManagerActor.tell(
new JobClusterManagerProto.JobClustersManagerInitialize(
mantisSchedulerFactory, false), ActorRef.noSender());
final JobClusterRouteHandler jobClusterRouteHandler = new JobClusterRouteHandlerAkkaImpl(
jobClustersManagerActor);
final JobClustersRoute app = new JobClustersRoute(jobClusterRouteHandler, system);
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow =
app.createRoute(Function.identity())
.flow(system, materializer);
logger.info("starting test server on port {}", SERVER_PORT);
binding = http.bindAndHandle(
routeFlow,
ConnectHttp.toHost("localhost", SERVER_PORT),
materializer);
latch.countDown();
} catch (Exception e) {
logger.info("caught exception", e);
latch.countDown();
e.printStackTrace();
}
});
t.setDaemon(true);
t.start();
latch.await();
}
@AfterClass
public static void tearDown() throws Exception {
logger.info("V1JobClusterRouteTest teardown");
binding.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> system.terminate()); // and shutdown when done
t.interrupt();
FileUtils.deleteDirectory(stateDirectory);
}
@Test
public void testIt() throws Exception {
cleanupExistingJobs();
testJobClusterCreate();
testDuplicateJobClusterCreate();
testNonExistentJobClusterLatestJobDiscoveryInfo();
testJobClusterLatestJobDiscoveryInfoNoRunningJobs();
testJobClustersList();
testJobClustersDelete();
testJobClustersPut();
testJobClusterInstanceGET();
testNonExistentJobClusterInstanceGET();
testJobClusterInstancePOSTNotAllowed();
testJobClusterInstanceValidUpdate();
testJobClusterInstanceInvalidUpdate();
testJobClusterInstanceNonExistentUpdate();
testJobClusterNonExistentDelete();
testJobClusterActionUpdateArtifactPost();
testJobClusterActionUpdateArtifactPostNonExistent();
testJobClusterActionUpdateArtifactPostNonMatchedResource();
testJobClusterActionUpdateArtifactGetNotAllowed();
testJobClusterActionUpdateArtifactPUTNotAllowed();
testJobClusterActionUpdateArtifactDELETENotAllowed();
testJobClusterActionUpdateSlaPost();
testJobClusterActionUpdateSlaPostNonExistent();
testJobClusterActionUpdateSlaPostNonMatchedResource();
testJobClusterActionUpdateSlaGetNotAllowed();
testJobClusterActionUpdateSlaPUTNotAllowed();
testJobClusterActionUpdateSlaDELETENotAllowed();
testJobClusterActionUpdateMigrationPost();
testJobClusterActionUpdateMigrationPostNonExistent();
testJobClusterActionUpdateMigrationPostNonMatchedResource();
testJobClusterActionUpdateMigrationGetNotAllowed();
testJobClusterActionUpdateMigrationPUTNotAllowed();
testJobClusterActionUpdateMigrationDELETENotAllowed();
testJobClusterActionUpdateLabelPost();
testJobClusterActionUpdateLabelPostNonExistent();
testJobClusterActionUpdateLabelPostNonMatchedResource();
testJobClusterActionUpdateLabelGetNotAllowed();
testJobClusterActionUpdateLabelPUTNotAllowed();
testJobClusterActionUpdateLabelDELETENotAllowed();
testJobClusterActionEnablePost();
testJobClusterActionEnablePostNonExistent();
testJobClusterActionEnablePostNonMatchedResource();
testJobClusterActionEnableGetNotAllowed();
testJobClusterActionEnablePUTNotAllowed();
testJobClusterActionEnableDELETENotAllowed();
testJobClusterActionDisablePost();
testJobClusterActionDisablePostNonExistent();
testJobClusterActionDisablePostNonMatchedResource();
testJobClusterActionDisableGetNotAllowed();
testJobClusterActionDisablePUTNotAllowed();
testJobClusterActionDisableDELETENotAllowed();
testJobClusterDeleteWithoutRequiredParam();
testJobClusterValidDelete();
}
private void cleanupExistingJobs() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.DELETE(getJobClusterInstanceEndpoint(TEST_CLUSTER_NAME))
);
responseFuture.whenComplete((msg, t) -> latch.countDown());
assertTrue(latch.await(1, TimeUnit.SECONDS));
}
private void testJobClusterCreate() throws InterruptedException {
testPost(
getJobClustersEndpoint(),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_CREATE),
StatusCodes.CREATED,
this::compareClusterInstancePayload);
assert this.isClusterExist(TEST_CLUSTER_NAME);
}
private void testDuplicateJobClusterCreate() throws InterruptedException {
testPost(
getJobClustersEndpoint(),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_CREATE),
StatusCodes.CONFLICT,
null);
}
private void testNonExistentJobClusterLatestJobDiscoveryInfo() throws InterruptedException {
testGet(
getJobClusterLatestJobDiscoveryInfoEp("NonExistentCluster"),
StatusCodes.NOT_FOUND,
null);
}
private void testJobClusterLatestJobDiscoveryInfoNoRunningJobs() throws InterruptedException {
testGet(
getJobClusterLatestJobDiscoveryInfoEp(TEST_CLUSTER_NAME),
StatusCodes.NOT_FOUND,
null);
}
private void testJobClustersList() throws InterruptedException {
testGet(
getJobClustersEndpoint(),
StatusCodes.OK,
this::compareClustersPayload
);
}
private void testJobClustersDelete() throws InterruptedException {
testDelete(
getJobClustersEndpoint(),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testJobClustersPut() throws InterruptedException {
testPut(
getJobClustersEndpoint(),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testJobClusterInstanceGET() throws InterruptedException {
testGet(
getJobClusterInstanceEndpoint(TEST_CLUSTER_NAME),
StatusCodes.OK,
this::compareClusterInstancePayload);
}
private void testNonExistentJobClusterInstanceGET() throws InterruptedException {
testGet(
getJobClusterInstanceEndpoint("doesNotExist"),
StatusCodes.NOT_FOUND,
null
);
}
private void testJobClusterInstancePOSTNotAllowed() throws InterruptedException {
testPost(
getJobClusterInstanceEndpoint(TEST_CLUSTER_NAME),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testJobClusterInstanceValidUpdate() throws InterruptedException {
testPut(
getJobClusterInstanceEndpoint(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_VALID_UPDATE),
StatusCodes.OK,
this::compareClusterInstancePayload);
}
private void testJobClusterInstanceInvalidUpdate() throws InterruptedException {
testPut(
getJobClusterInstanceEndpoint(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_INVALID_UPDATE),
StatusCodes.BAD_REQUEST,
null);
}
private void testJobClusterInstanceNonExistentUpdate() throws InterruptedException {
testPut(
getJobClusterInstanceEndpoint("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_INVALID_UPDATE),
StatusCodes.NOT_FOUND,
null);
}
private void testJobClusterNonExistentDelete() throws InterruptedException {
testDelete(
getJobClusterInstanceEndpoint("NonExistent") + "?user=test&reason=unittest",
StatusCodes.NOT_FOUND,
null);
}
private void testJobClusterActionUpdateArtifactPost() throws InterruptedException {
testPost(
getJobClusterUpdateArtifactEp(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT),
StatusCodes.NO_CONTENT,
EMPTY_RESPONSE_VALIDATOR);
}
private void testJobClusterActionUpdateArtifactPostNonExistent() throws InterruptedException {
testPost(
getJobClusterUpdateArtifactEp("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT),
StatusCodes.BAD_REQUEST,
(m) -> {
assert m.contains(
"Cluster name specified in request payload sine-function does " +
"not match with what specified in resource path NonExistent");
});
}
private void testJobClusterActionUpdateArtifactPostNonMatchedResource() throws InterruptedException {
testPost(
getJobClusterUpdateArtifactEp("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT_NON_EXISTENT),
StatusCodes.NOT_FOUND,
null);
}
private void testJobClusterActionUpdateArtifactGetNotAllowed() throws InterruptedException {
testGet(
getJobClusterUpdateArtifactEp(TEST_CLUSTER_NAME),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testJobClusterActionUpdateArtifactPUTNotAllowed() throws InterruptedException {
testPut(
getJobClusterUpdateArtifactEp(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testJobClusterActionUpdateArtifactDELETENotAllowed() throws InterruptedException {
testDelete(
getJobClusterUpdateArtifactEp(TEST_CLUSTER_NAME),
StatusCodes.METHOD_NOT_ALLOWED,
null
);
}
/** test Update SLA actions **/
private void testJobClusterActionUpdateSlaPost() throws InterruptedException {
testPost(getJobClusterUpdateSlaEp(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_UPDATE_SLA),
StatusCodes.NO_CONTENT, null);
}
private void testJobClusterActionUpdateSlaPostNonExistent() throws InterruptedException {
testPost(
getJobClusterUpdateSlaEp("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_UPDATE_SLA_NONEXISTENT),
StatusCodes.NOT_FOUND,
null);
}
private void testJobClusterActionUpdateSlaPostNonMatchedResource() throws InterruptedException {
testPost(
getJobClusterUpdateSlaEp("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_UPDATE_SLA),
StatusCodes.BAD_REQUEST,
null);
}
private void testJobClusterActionUpdateSlaGetNotAllowed() throws InterruptedException {
testGet(
getJobClusterUpdateSlaEp(TEST_CLUSTER_NAME),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testJobClusterActionUpdateSlaPUTNotAllowed() throws InterruptedException {
testPut(
getJobClusterUpdateSlaEp(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_UPDATE_SLA),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testJobClusterActionUpdateSlaDELETENotAllowed() throws InterruptedException {
testDelete(
getJobClusterUpdateSlaEp(TEST_CLUSTER_NAME),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
/** Update migration strategy actions tests **/
private void testJobClusterActionUpdateMigrationPost() throws InterruptedException {
testPost(
getJobClusterUpdateMigrationStrategyEp(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.MIGRATE_STRATEGY_UPDATE),
StatusCodes.NO_CONTENT,
null);
}
private void testJobClusterActionUpdateMigrationPostNonExistent() throws InterruptedException {
testPost(
getJobClusterUpdateMigrationStrategyEp("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.MIGRATE_STRATEGY_UPDATE_NONEXISTENT),
StatusCodes.NOT_FOUND,
null);
}
private void testJobClusterActionUpdateMigrationPostNonMatchedResource() throws InterruptedException {
testPost(
getJobClusterUpdateMigrationStrategyEp("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.MIGRATE_STRATEGY_UPDATE),
StatusCodes.BAD_REQUEST,
null);
}
private void testJobClusterActionUpdateMigrationGetNotAllowed() throws InterruptedException {
testGet(
getJobClusterUpdateMigrationStrategyEp(TEST_CLUSTER_NAME),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testJobClusterActionUpdateMigrationPUTNotAllowed() throws InterruptedException {
testPut(
getJobClusterUpdateMigrationStrategyEp(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.MIGRATE_STRATEGY_UPDATE),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testJobClusterActionUpdateMigrationDELETENotAllowed() throws InterruptedException {
testDelete(
getJobClusterUpdateMigrationStrategyEp(TEST_CLUSTER_NAME),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
/** Update label actions tests **/
private void testJobClusterActionUpdateLabelPost() throws InterruptedException {
testPost(
getJobClusterUpdateLabelEp(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_UPDATE_LABELS),
StatusCodes.NO_CONTENT,
null);
}
private void testJobClusterActionUpdateLabelPostNonExistent() throws InterruptedException {
testPost(
getJobClusterUpdateLabelEp("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_UPDATE_LABELS_NONEXISTENT),
StatusCodes.NOT_FOUND,
null);
}
private void testJobClusterActionUpdateLabelPostNonMatchedResource() throws InterruptedException {
testPost(
getJobClusterUpdateLabelEp("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_UPDATE_LABELS),
StatusCodes.BAD_REQUEST,
null);
}
private void testJobClusterActionUpdateLabelGetNotAllowed() throws InterruptedException {
testGet(
getJobClusterUpdateLabelEp(TEST_CLUSTER_NAME),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testJobClusterActionUpdateLabelPUTNotAllowed() throws InterruptedException {
testPut(
getJobClusterUpdateLabelEp(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_UPDATE_LABELS),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testJobClusterActionUpdateLabelDELETENotAllowed() throws InterruptedException {
testDelete(
getJobClusterUpdateLabelEp(TEST_CLUSTER_NAME),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
/** enable cluster action test **/
private void testJobClusterActionEnablePost() throws InterruptedException {
testPost(
getJobClusterEnableEp(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_ENABLE),
StatusCodes.NO_CONTENT,
null);
}
private void testJobClusterActionEnablePostNonExistent() throws InterruptedException {
testPost(
getJobClusterEnableEp("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_ENABLE_NONEXISTENT),
StatusCodes.NOT_FOUND,
null);
}
private void testJobClusterActionEnablePostNonMatchedResource() throws InterruptedException {
testPost(
getJobClusterEnableEp("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_ENABLE),
StatusCodes.BAD_REQUEST,
null);
}
private void testJobClusterActionEnableGetNotAllowed() throws InterruptedException {
testGet(getJobClusterEnableEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null);
}
private void testJobClusterActionEnablePUTNotAllowed() throws InterruptedException {
testPut(getJobClusterEnableEp(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_ENABLE),
StatusCodes.METHOD_NOT_ALLOWED, null);
}
private void testJobClusterActionEnableDELETENotAllowed() throws InterruptedException {
testDelete(getJobClusterEnableEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null);
}
/** disable cluster action test **/
private void testJobClusterActionDisablePost() throws InterruptedException {
testPost(
getJobClusterDisableEp(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_DISABLE),
StatusCodes.NO_CONTENT,
null
);
}
private void testJobClusterActionDisablePostNonExistent() throws InterruptedException {
testPost(
getJobClusterDisableEp("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_DISABLE_NONEXISTENT),
StatusCodes.NOT_FOUND,
null);
}
private void testJobClusterActionDisablePostNonMatchedResource() throws InterruptedException {
testPost(
getJobClusterDisableEp("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_DISABLE),
StatusCodes.BAD_REQUEST,
null);
}
private void testJobClusterActionDisableGetNotAllowed() throws InterruptedException {
testGet(getJobClusterDisableEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null);
}
private void testJobClusterActionDisablePUTNotAllowed() throws InterruptedException {
testPut(
getJobClusterDisableEp(TEST_CLUSTER_NAME),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_DISABLE),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testJobClusterActionDisableDELETENotAllowed() throws InterruptedException {
testDelete(getJobClusterDisableEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null);
}
private void testJobClusterDeleteWithoutRequiredParam() throws InterruptedException {
testDelete(
getJobClusterInstanceEndpoint("sine-function"),
StatusCodes.BAD_REQUEST,
null);
}
private void testJobClusterValidDelete() throws InterruptedException {
assert isClusterExist("sine-function");
testDelete(getJobClusterInstanceEndpoint("sine-function") + "?user=test&reason=unittest",
StatusCodes.ACCEPTED, null);
boolean clusterExist = isClusterExist("sine-function");
int retry = 10;
while (clusterExist && retry > 0) {
Thread.sleep(1000);
clusterExist = isClusterExist("sine-function");
retry--;
}
assert !clusterExist;
}
private void compareClusterInstancePayload(String clusterGetResponse) {
try {
ObjectMapper mapper = new ObjectMapper();
JsonNode requestObj = mapper.readTree(JobClusterPayloads.JOB_CLUSTER_CREATE);
JsonNode responseObj = mapper.readTree(clusterGetResponse);
assertEquals(
responseObj.get("name").toString(),
requestObj.get("jobDefinition").get("name").toString());
assertEquals(
responseObj.get("jars").get(0).get("url").toString(),
requestObj.get("jobDefinition").get("jobJarFileLocation").toString());
assertEquals(
responseObj.get("jars").get(0).get("version").toString(),
requestObj.get("jobDefinition").get("version").toString());
} catch (IOException ex) {
assert ex == null;
}
}
private void compareClustersPayload(String clusterListResponse) {
try {
ObjectMapper mapper = new ObjectMapper();
JsonNode responseObj = mapper.readTree(clusterListResponse);
assert (responseObj.get("list") != null);
assert (responseObj.get("prev") != null);
assert (responseObj.get("next") != null);
compareClusterInstancePayload(responseObj.get("list").get(0).toString());
} catch (IOException ex) {
assert ex == null;
}
}
}
| 7,907 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v1/RouteTestBase.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import akka.actor.ActorSystem;
import akka.http.javadsl.Http;
import akka.http.javadsl.model.HttpEntity;
import akka.http.javadsl.model.HttpMethod;
import akka.http.javadsl.model.HttpMethods;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.http.javadsl.model.RequestEntity;
import akka.http.javadsl.model.StatusCode;
import akka.stream.Materializer;
import akka.testkit.javadsl.TestKit;
import akka.util.ByteString;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.util.Strings;
public abstract class RouteTestBase {
private final static Logger logger = LoggerFactory.getLogger(RouteTestBase.class);
static ActorSystem system;
static Materializer materializer;
static Http http;
private final String testName;
final private int serverPort;
static ResponseValidatorFunc EMPTY_RESPONSE_VALIDATOR = (msg) -> {
assertTrue(String.format("response [%s] is not empty", msg), Strings.isNullOrEmpty(msg));
};
RouteTestBase(String testName, int port) {
this.testName = testName;
this.system = ActorSystem.create(testName);
this.materializer = Materializer.createMaterializer(system);
this.http = Http.get(system);
this.serverPort = port;
}
@BeforeClass
public static void setupActorSystem() {
system = ActorSystem.create();
materializer = Materializer.createMaterializer(system);
http = Http.get(system);
}
@AfterClass
public static void tearDownActorSystem() {
try {
http.shutdownAllConnectionPools();
} catch (Exception e) {
logger.error("Failed to close http", e);
}
try {
materializer.shutdown();
} catch (Exception e) {
logger.error("Failed to shutdown materializer", e);
}
TestKit.shutdownActorSystem(system);
}
final String getJobClustersEndpoint() {
return String.format(
"http://127.0.0.1:%d/api/v1/jobClusters",
serverPort);
}
final String getJobClusterInstanceEndpoint(String clusterName) {
return String.format(
"http://127.0.0.1:%d/api/v1/jobClusters/%s",
serverPort,
clusterName);
}
final String getJobClusterLatestJobDiscoveryInfoEp(String clusterName) {
return String.format(
"http://127.0.0.1:%d/api/v1/jobClusters/%s/latestJobDiscoveryInfo",
serverPort,
clusterName);
}
final String getJobClusterUpdateArtifactEp(String clusterName) {
return getJobClusterInstanceEndpoint(clusterName) + "/actions/updateArtifact";
}
final String getJobClusterUpdateSlaEp(String clusterName) {
return getJobClusterInstanceEndpoint(clusterName) + "/actions/updateSla";
}
final String getJobClusterUpdateMigrationStrategyEp(String clusterName) {
return getJobClusterInstanceEndpoint(clusterName) + "/actions/updateMigrationStrategy";
}
final String getJobClusterUpdateLabelEp(String clusterName) {
return getJobClusterInstanceEndpoint(clusterName) + "/actions/updateLabel";
}
final String getJobClusterEnableEp(String clusterName) {
return getJobClusterInstanceEndpoint(clusterName) + "/actions/enableCluster";
}
final String getJobClusterDisableEp(String clusterName) {
return getJobClusterInstanceEndpoint(clusterName) + "/actions/disableCluster";
}
final String getJobsEndpoint() {
return String.format(
"http://127.0.0.1:%d/api/v1/jobs",
serverPort);
}
final String getClusterJobsEndpoint(String clusterName) {
return String.format(
"http://127.0.0.1:%d/api/v1/jobClusters/%s/jobs",
serverPort,
clusterName);
}
final String getJobInstanceEndpoint(String clusterName, String jobId) {
return String.format(
"http://127.0.0.1:%d/api/v1/jobClusters/%s/jobs/%s",
serverPort,
clusterName,
jobId);
}
final String getJobInstanceEndpoint(String jobId) {
return String.format(
"http://127.0.0.1:%d/api/v1/jobs/%s",
serverPort,
jobId);
}
CompletionStage<String> processRespFut(
final HttpResponse r,
final int expectedStatusCode) {
logger.info("headers {} {}", r.getHeaders(), r.status());
logger.info("response entity: {}", r.entity());
assertEquals(expectedStatusCode, r.status().intValue());
if (r.getHeader("Access-Control-Allow-Origin").isPresent()) {
assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value());
}
CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer);
return strictEntity.thenCompose(s -> s.getDataBytes()
.runFold(
ByteString.emptyByteString(),
ByteString::concat,
materializer)
.thenApply(ByteString::utf8String)
);
}
String getResponseMessage(final String msg, final Throwable t) {
if (t != null) {
logger.error("got err ", t);
fail(t.getMessage());
} else {
logger.info("got response {}", msg);
return msg;
}
logger.info("got empty response {}");
return "";
}
boolean isClusterExist(String clusterName) {
final boolean result =
http.singleRequest(HttpRequest.GET(getJobClusterInstanceEndpoint(clusterName)))
.thenApply(r -> r.status().intValue() != 404)
.toCompletableFuture()
.handle((x, y) -> x)
.join();
return result;
}
void deleteClusterIfExist(String clusterName) throws InterruptedException {
if (isClusterExist(clusterName)) {
final CountDownLatch latch = new CountDownLatch(1);
http.singleRequest(HttpRequest.DELETE(getJobClusterInstanceEndpoint(clusterName)))
.thenCompose(r -> processRespFut(r, 202))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
latch.countDown();
});
assertTrue(latch.await(1, TimeUnit.SECONDS));
} else {
logger.info("Cluster {} does not exist, no need to delete", clusterName);
}
}
void testGet(
String endpoint,
StatusCode expectedResponseCode,
ResponseValidatorFunc validatorFunc) throws InterruptedException {
testHttpRequest(
HttpMethods.GET,
endpoint,
expectedResponseCode,
validatorFunc);
}
void testPost(
String endpoint,
RequestEntity requestEntity,
StatusCode expectedResponseCode,
ResponseValidatorFunc validatorFunc) throws InterruptedException {
testHttpRequest(
HttpMethods.POST,
endpoint,
requestEntity,
expectedResponseCode,
validatorFunc);
}
void testPost(
String endpoint,
StatusCode expectedResponseCode,
ResponseValidatorFunc validatorFunc) throws InterruptedException {
testHttpRequest(
HttpMethods.POST,
endpoint,
expectedResponseCode,
validatorFunc);
}
void testPut(
String endpoint,
StatusCode expectedResponseCode,
ResponseValidatorFunc validatorFunc) throws InterruptedException {
testHttpRequest(
HttpMethods.PUT,
endpoint,
expectedResponseCode,
validatorFunc);
}
void testPut(
String endpoint,
RequestEntity requestEntity,
StatusCode expectedResponseCode,
ResponseValidatorFunc validatorFunc) throws InterruptedException {
testHttpRequest(
HttpMethods.PUT,
endpoint,
requestEntity,
expectedResponseCode,
validatorFunc);
}
void testDelete(
String endpoint,
StatusCode expectedResponseCode,
ResponseValidatorFunc validatorFunc) throws InterruptedException {
testHttpRequest(HttpMethods.DELETE, endpoint, expectedResponseCode, validatorFunc);
}
void testHttpRequest(
HttpMethod httpMethod,
String endpoint,
StatusCode expectedResponseCode,
ResponseValidatorFunc validatorFunc) throws InterruptedException {
testHttpRequest(
HttpRequest.create().withMethod(httpMethod).withUri(endpoint),
expectedResponseCode,
validatorFunc);
}
private void testHttpRequest(
HttpMethod httpMethod,
String endpoint,
RequestEntity requestEntity,
StatusCode expectedResponseCode,
ResponseValidatorFunc validatorFunc) throws InterruptedException {
testHttpRequest(
HttpRequest.create()
.withMethod(httpMethod)
.withUri(endpoint)
.withEntity(requestEntity),
expectedResponseCode,
validatorFunc);
}
private void testHttpRequest(
HttpRequest request,
StatusCode expectedResponseCode,
ResponseValidatorFunc validatorFunc) throws InterruptedException {
assert request != null;
logger.info(request.toString());
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(request);
try {
responseFuture
.thenCompose(r -> processRespFut(r, expectedResponseCode.intValue()))
.whenComplete((msg, t) -> {
logger.info("got response: {}", msg);
assert t == null;
if (null != validatorFunc) {
validatorFunc.validate(msg);
}
latch.countDown();
})
.toCompletableFuture()
.get(2, TimeUnit.SECONDS);
} catch (ExecutionException e) {
throw new RuntimeException(e);
} catch (TimeoutException e) {
throw new RuntimeException(e);
}
}
@FunctionalInterface
interface ResponseValidatorFunc {
void validate(String response);
}
}
| 7,908 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v1/TestMantisClient.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.NamedJobInfo;
import io.mantisrx.server.core.master.MasterDescription;
import io.netty.buffer.ByteBuf;
import io.netty.handler.codec.http.HttpResponseStatus;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import mantis.io.reactivex.netty.RxNetty;
import mantis.io.reactivex.netty.pipeline.PipelineConfigurators;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Func1;
import rx.functions.Func2;
public class TestMantisClient {
private static final Logger logger = LoggerFactory.getLogger(TestMantisClient.class);
private final int serverPort;
public TestMantisClient(final int serverPort) {
this.serverPort = serverPort;
}
private final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic = attempts -> attempts
.zipWith(Observable.range(1, Integer.MAX_VALUE), (Func2<Throwable, Integer, Integer>) (t1, integer) -> integer)
.flatMap((Func1<Integer, Observable<?>>) integer -> {
long delay = 2 * (integer > 10 ? 10 : integer);
logger.info(": retrying conx after sleeping for " + delay + " secs");
return Observable.timer(delay, TimeUnit.SECONDS);
});
private final Func1<Observable<? extends Void>, Observable<?>> repeatLogic = attempts -> attempts
.zipWith(Observable.range(1, Integer.MAX_VALUE), (Func2<Void, Integer, Integer>) (t1, integer) -> integer)
.flatMap((Func1<Integer, Observable<?>>) integer -> {
long delay = 2 * (integer > 10 ? 10 : integer);
logger.warn("On Complete received! : repeating conx after sleeping for " + delay + " secs");
return Observable.timer(delay, TimeUnit.SECONDS);
});
private HttpClient<ByteBuf, ServerSentEvent> getRxnettySseClient(String hostname, int port) {
return RxNetty.<ByteBuf, ServerSentEvent>newHttpClientBuilder(hostname, port)
.pipelineConfigurator(PipelineConfigurators.<ByteBuf>clientSseConfigurator())
// .enableWireLogging(LogLevel.INFO)
.withNoConnectionPooling().build();
}
public Observable<JobSchedulingInfo> discoveryStream(final String jobId,
final Func1<Observable<? extends Throwable>, Observable<?>> retryFn,
final Func1<Observable<? extends Void>, Observable<?>> repeatFn) {
return Observable.just(new MasterDescription("localhost", "127.0.0.1", serverPort, serverPort,
serverPort, "/api/postjobstatus", serverPort, System.currentTimeMillis()))
.retryWhen(retryFn)
.switchMap(new Func1<MasterDescription, Observable<JobSchedulingInfo>>() {
@Override
public Observable<JobSchedulingInfo> call(MasterDescription masterDescription) {
return getRxnettySseClient(masterDescription.getHostname(), masterDescription.getSchedInfoPort())
.submit(HttpClientRequest.createGet("/api/v1/jobDiscoveryStream/" + jobId + "?sendHB=true"))
.flatMap(new Func1<HttpClientResponse<ServerSentEvent>, Observable<JobSchedulingInfo>>() {
@Override
public Observable<JobSchedulingInfo> call(HttpClientResponse<ServerSentEvent> response) {
if (!HttpResponseStatus.OK.equals(response.getStatus())) {
return Observable.error(new Exception(response.getStatus().reasonPhrase()));
}
return response.getContent()
.map(new Func1<ServerSentEvent, JobSchedulingInfo>() {
@Override
public JobSchedulingInfo call(ServerSentEvent event) {
try {
return Jackson.fromJSON(event.contentAsString(), JobSchedulingInfo.class);
} catch (IOException e) {
throw new RuntimeException("Invalid schedInfo json: " + e.getMessage(), e);
}
}
})
.timeout(3 * 60, TimeUnit.SECONDS)
.filter(new Func1<JobSchedulingInfo, Boolean>() {
@Override
public Boolean call(JobSchedulingInfo schedulingInfo) {
return schedulingInfo != null && !JobSchedulingInfo.HB_JobId.equals(schedulingInfo.getJobId());
}
})
;
}
})
;
}
})
.repeatWhen(repeatFn)
.retryWhen(retryFn)
;
}
public Observable<JobSchedulingInfo> discoveryStream(final String jobId) {
return discoveryStream(jobId, retryLogic, repeatLogic);
}
public Observable<NamedJobInfo> namedJobInfo(final String jobName, final Func1<Observable<? extends Throwable>, Observable<?>> retryFn,
final Func1<Observable<? extends Void>, Observable<?>> repeatFn) {
return Observable.just(new MasterDescription("localhost", "127.0.0.1", serverPort, serverPort,
serverPort, "/api/postjobstatus", serverPort, System.currentTimeMillis()))
.filter(new Func1<MasterDescription, Boolean>() {
@Override
public Boolean call(MasterDescription masterDescription) {
return masterDescription != null;
}
})
.retryWhen(retryFn)
.switchMap(new Func1<MasterDescription, Observable<NamedJobInfo>>() {
@Override
public Observable<NamedJobInfo> call(MasterDescription masterDescription) {
return getRxnettySseClient(masterDescription.getHostname(), masterDescription.getSchedInfoPort())
.submit(HttpClientRequest.createGet("/api/v1/lastSubmittedJobIdStream/" + jobName + "?sendHB=true"))
.flatMap(new Func1<HttpClientResponse<ServerSentEvent>, Observable<NamedJobInfo>>() {
@Override
public Observable<NamedJobInfo> call(HttpClientResponse<ServerSentEvent> response) {
if(!HttpResponseStatus.OK.equals(response.getStatus()))
return Observable.error(new Exception(response.getStatus().reasonPhrase()));
return response.getContent()
.map(new Func1<ServerSentEvent, NamedJobInfo>() {
@Override
public NamedJobInfo call(ServerSentEvent event) {
try {
return Jackson.fromJSON(event.contentAsString(), NamedJobInfo.class);
} catch (IOException e) {
throw new RuntimeException("Invalid namedJobInfo json: " + e.getMessage(), e);
}
}
})
.timeout(3 * 60, TimeUnit.SECONDS)
.filter(new Func1<NamedJobInfo, Boolean>() {
@Override
public Boolean call(NamedJobInfo namedJobInfo) {
return namedJobInfo != null && !JobSchedulingInfo.HB_JobId.equals(namedJobInfo.getName());
}
})
;
}})
;
}
})
.repeatWhen(repeatFn)
.retryWhen(retryFn)
;
}
public Observable<NamedJobInfo> namedJobInfo(final String jobName) {
return namedJobInfo(jobName, retryLogic, repeatLogic);
}
}
| 7,909 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v1/JobArtifactSerdeTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static org.junit.Assert.assertEquals;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.server.core.domain.ArtifactID;
import io.mantisrx.server.core.domain.JobArtifact;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.SerializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.time.Instant;
import org.junit.Test;
public class JobArtifactSerdeTest {
private static final ObjectMapper mapper = new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false)
.registerModule(new Jdk8Module())
.registerModule(new JavaTimeModule());
public static final SimpleFilterProvider DEFAULT_FILTER_PROVIDER;
static {
DEFAULT_FILTER_PROVIDER = new SimpleFilterProvider();
DEFAULT_FILTER_PROVIDER.setFailOnUnknownId(false);
}
@Test
public void testIfJobArtifactIsSerializableByJson() throws Exception {
final JobArtifact artifact =
JobArtifact.builder()
.artifactID(ArtifactID.of("id"))
.name("proj1")
.version("v1")
.createdAt(Instant.ofEpochMilli(1668135952L))
.runtimeType("sbn")
.dependencies(ImmutableMap.of("de1", "1.0.0"))
.entrypoint("entrypoint")
.build();
String metaJson = Jackson.toJSON(mapper, null, artifact);
assertEquals(metaJson, "{\"artifactID\":{\"resourceID\":\"id\"},\"name\":\"proj1\",\"version\":\"v1\",\"createdAt\":1668135.952000000,\"runtimeType\":\"sbn\",\"dependencies\":{\"de1\":\"1.0.0\"},\"entrypoint\":\"entrypoint\"}");
final JobArtifact actual = Jackson.fromJSON(mapper, metaJson, JobArtifact.class);
assertEquals(artifact, actual);
}
}
| 7,910 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v1/JobDiscoveryStreamRouteTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.http.javadsl.ConnectHttp;
import akka.http.javadsl.Http;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.stream.ActorMaterializer;
import akka.stream.javadsl.Flow;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.JobClustersManagerActor;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandlerAkkaImpl;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.job.JobTestHelper;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.scheduler.AgentsErrorMonitorActor;
import io.mantisrx.master.scheduler.FakeMantisScheduler;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.NamedJobInfo;
import io.mantisrx.server.master.persistence.FileBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import java.time.Duration;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.function.Function;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
public class JobDiscoveryStreamRouteTest extends RouteTestBase {
private final static Logger logger = LoggerFactory.getLogger(JobDiscoveryStreamRouteTest.class);
private static Thread t;
private static final int SERVER_PORT = 8201;
private static volatile CompletionStage<ServerBinding> binding;
private static ActorRef agentsErrorMonitorActor;
private final TestMantisClient mantisClient = new TestMantisClient(SERVER_PORT);
public JobDiscoveryStreamRouteTest(){
super("JobDiscoveryRoute", SERVER_PORT);
}
@BeforeClass
public static void setup() throws Exception {
JobTestHelper.deleteAllFiles();
JobTestHelper.createDirsIfRequired();
final CountDownLatch latch = new CountDownLatch(1);
t = new Thread(() -> {
try {
// boot up server using the route as defined below
final Http http = Http.get(system);
final ActorMaterializer materializer = ActorMaterializer.create(system);
final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl());
TestHelpers.setupMasterConfig();
ActorRef jobClustersManagerActor = system.actorOf(
JobClustersManagerActor.props(
new MantisJobStore(new FileBasedPersistenceProvider(true)),
lifecycleEventPublisher,
CostsCalculator.noop()),
"jobClustersManager");
MantisSchedulerFactory fakeSchedulerFactory = mock(MantisSchedulerFactory.class);
MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor);
when(fakeSchedulerFactory.forJob(any())).thenReturn(fakeScheduler);
jobClustersManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(fakeSchedulerFactory, false), ActorRef.noSender());
agentsErrorMonitorActor = system.actorOf(AgentsErrorMonitorActor.props());
agentsErrorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(fakeScheduler), ActorRef.noSender());
Duration idleTimeout = system.settings().config().getDuration("akka.http.server.idle-timeout");
logger.info("idle timeout {} sec ", idleTimeout.getSeconds());
final JobDiscoveryRouteHandler jobDiscoveryRouteHandler = new JobDiscoveryRouteHandlerAkkaImpl(jobClustersManagerActor, idleTimeout);
final JobDiscoveryStreamRoute jobDiscoveryRoute = new JobDiscoveryStreamRoute(jobDiscoveryRouteHandler);
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = jobDiscoveryRoute.createRoute(Function.identity()).flow(system, materializer);
logger.info("starting test server on port {}", SERVER_PORT);
binding = http.bindAndHandle(routeFlow,
ConnectHttp.toHost("localhost", SERVER_PORT), materializer);
latch.countDown();
} catch (Exception e) {
logger.info("caught exception", e);
latch.countDown();
e.printStackTrace();
}
});
t.setDaemon(true);
t.start();
latch.await();
}
@AfterClass
public static void teardown() {
logger.info("JobDiscoveryRouteTest teardown");
if (binding != null) {
binding
.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> system.terminate()); // and shutdown when done
}
t.interrupt();
}
@Test
public void testJobDiscoveryStreamForNonExistentJob() throws InterruptedException {
// The current behavior of Mantis client is to retry non-200 responses
// This test overrides the default retry/repeat behavior to test a Sched info observable would complete if the job id requested is non-existent
final CountDownLatch latch = new CountDownLatch(1);
Observable<JobSchedulingInfo> jobSchedulingInfoObservable = mantisClient
.discoveryStream("testJobCluster-1",
obs -> Observable.just(1),
obs -> Observable.empty()
);
jobSchedulingInfoObservable
.doOnNext(x -> logger.info("onNext {}", x))
.doOnError(t -> logger.warn("onError", t))
.doOnCompleted(() -> {
logger.info("onCompleted");
latch.countDown();
})
.subscribe();
latch.await();
}
@Test
public void testLastSubmittedJobIdStreamForNonExistentJob() throws InterruptedException {
// The current behavior of Mantis client is to retry non-200 responses
// This test overrides the default retry/repeat behavior to test a namedjob info observable would complete if the job cluster requested is non-existent
final CountDownLatch latch = new CountDownLatch(1);
Observable<NamedJobInfo> jobSchedulingInfoObservable = mantisClient
.namedJobInfo("testJobCluster",
obs -> Observable.just(1),
obs -> Observable.empty()
);
jobSchedulingInfoObservable
.doOnNext(x -> logger.info("onNext {}", x))
.doOnError(t -> logger.warn("onError", t))
.doOnCompleted(() -> {
logger.info("onCompleted");
latch.countDown();
})
.subscribe();
latch.await();
}
}
| 7,911 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v1/JobsRouteTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.http.javadsl.ConnectHttp;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.ContentTypes;
import akka.http.javadsl.model.HttpEntities;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.http.javadsl.model.StatusCodes;
import akka.stream.javadsl.Flow;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.JobClustersManagerActor;
import io.mantisrx.master.api.akka.payloads.JobClusterPayloads;
import io.mantisrx.master.api.akka.payloads.JobPayloads;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.api.akka.route.MantisMasterRoute;
import io.mantisrx.master.api.akka.route.handlers.JobArtifactRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobArtifactRouteHandlerImpl;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.ResourceClusterRouteHandler;
import io.mantisrx.master.api.akka.route.v0.AgentClusterRoute;
import io.mantisrx.master.api.akka.route.v0.JobClusterRoute;
import io.mantisrx.master.api.akka.route.v0.JobDiscoveryRoute;
import io.mantisrx.master.api.akka.route.v0.JobRoute;
import io.mantisrx.master.api.akka.route.v0.JobStatusRoute;
import io.mantisrx.master.api.akka.route.v0.MasterDescriptionRoute;
import io.mantisrx.master.events.*;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.job.JobTestHelper;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.scheduler.FakeMantisScheduler;
import io.mantisrx.master.vm.AgentClusterOperations;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.WorkerAssignments;
import io.mantisrx.server.core.master.LocalMasterMonitor;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.master.LeaderRedirectionFilter;
import io.mantisrx.server.master.LeadershipManagerLocalImpl;
import io.mantisrx.server.master.persistence.FileBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.resourcecluster.ResourceClusters;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import io.mantisrx.server.master.store.FileBasedStore;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.JsonNode;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.time.Duration;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.function.Function;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.util.Strings;
public class JobsRouteTest extends RouteTestBase {
private final static Logger logger = LoggerFactory.getLogger(JobsRouteTest.class);
private static Thread t;
private static final int SERVER_PORT = 8204;
private static CompletionStage<ServerBinding> binding;
private static final String TEST_CLUSTER = "sine-function";
private static final String TEST_JOB_ID = "sine-function-1";
public JobsRouteTest() {
super("JobsRoute", SERVER_PORT);
}
@BeforeClass
public static void setup() throws Exception {
JobTestHelper.deleteAllFiles();
JobTestHelper.createDirsIfRequired();
TestHelpers.setupMasterConfig();
final CountDownLatch latch = new CountDownLatch(1);
t = new Thread(() -> {
try {
// boot up server using the route as defined below
final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(
new AuditEventSubscriberLoggingImpl(),
new StatusEventSubscriberLoggingImpl(),
new WorkerEventSubscriberLoggingImpl());
ActorRef jobClustersManagerActor = system.actorOf(
JobClustersManagerActor.props(
new MantisJobStore(new FileBasedPersistenceProvider(true)),
lifecycleEventPublisher,
CostsCalculator.noop()),
"jobClustersManager");
IMantisPersistenceProvider simpleCachedFileStorageProvider = new FileBasedPersistenceProvider(new FileBasedStore());
MantisSchedulerFactory fakeSchedulerFactory = mock(MantisSchedulerFactory.class);
MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor);
when(fakeSchedulerFactory.forJob(any())).thenReturn(fakeScheduler);
jobClustersManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(
fakeSchedulerFactory,
false), ActorRef.noSender());
final JobClusterRouteHandler jobClusterRouteHandler = new JobClusterRouteHandlerAkkaImpl(
jobClustersManagerActor);
final JobArtifactRouteHandler jobArtifactRouteHandler = new JobArtifactRouteHandlerImpl(simpleCachedFileStorageProvider);
final JobRouteHandler jobRouteHandler = new JobRouteHandlerAkkaImpl(
jobClustersManagerActor);
MasterDescription masterDescription = new MasterDescription(
"127.0.0.1",
"127.0.0.1",
SERVER_PORT,
SERVER_PORT,
SERVER_PORT,
"api/postjobstatus",
SERVER_PORT,
System.currentTimeMillis());
Duration idleTimeout = system.settings()
.config()
.getDuration("akka.http.server.idle-timeout");
logger.info("idle timeout {} sec ", idleTimeout.getSeconds());
final AgentClusterOperations mockAgentClusterOps = mock(AgentClusterOperations.class);
final JobStatusRouteHandler jobStatusRouteHandler = mock(JobStatusRouteHandler.class);
when(jobStatusRouteHandler.jobStatus(anyString())).thenReturn(Flow.create());
final JobRoute v0JobRoute = new JobRoute(jobRouteHandler, system);
JobDiscoveryRouteHandler jobDiscoveryRouteHandler = new JobDiscoveryRouteHandlerAkkaImpl(
jobClustersManagerActor,
idleTimeout);
final JobDiscoveryRoute v0JobDiscoveryRoute = new JobDiscoveryRoute(
jobDiscoveryRouteHandler);
final JobClusterRoute v0JobClusterRoute = new JobClusterRoute(
jobClusterRouteHandler,
jobRouteHandler,
system);
final JobStatusRoute v0JobStatusRoute = new JobStatusRoute(jobStatusRouteHandler);
final AgentClusterRoute v0AgentClusterRoute = new AgentClusterRoute(
mockAgentClusterOps,
system);
final MasterDescriptionRoute v0MasterDescriptionRoute = new MasterDescriptionRoute(
masterDescription);
final JobsRoute v1JobsRoute = new JobsRoute(
jobClusterRouteHandler,
jobRouteHandler,
system);
final JobClustersRoute v1JobClusterRoute = new JobClustersRoute(
jobClusterRouteHandler, system);
final JobArtifactsRoute v1JobArtifactsRoute = new JobArtifactsRoute(jobArtifactRouteHandler);
final AgentClustersRoute v1AgentClustersRoute = new AgentClustersRoute(
mockAgentClusterOps);
final JobStatusStreamRoute v1JobStatusStreamRoute = new JobStatusStreamRoute(
jobStatusRouteHandler);
final AdminMasterRoute v1AdminMasterRoute = new AdminMasterRoute(masterDescription);
final JobDiscoveryStreamRoute v1JobDiscoveryStreamRoute = new JobDiscoveryStreamRoute(
jobDiscoveryRouteHandler);
final LastSubmittedJobIdStreamRoute v1LastSubmittedJobIdStreamRoute = new LastSubmittedJobIdStreamRoute(
jobDiscoveryRouteHandler);
LocalMasterMonitor localMasterMonitor = new LocalMasterMonitor(masterDescription);
LeadershipManagerLocalImpl leadershipMgr = new LeadershipManagerLocalImpl(
masterDescription);
leadershipMgr.setLeaderReady();
LeaderRedirectionFilter leaderRedirectionFilter = new LeaderRedirectionFilter(
localMasterMonitor,
leadershipMgr);
final MantisMasterRoute app = new MantisMasterRoute(
system,
leaderRedirectionFilter,
v0MasterDescriptionRoute,
v0JobClusterRoute,
v0JobRoute,
v0JobDiscoveryRoute,
v0JobStatusRoute,
v0AgentClusterRoute,
v1JobClusterRoute,
v1JobsRoute,
v1JobArtifactsRoute,
v1AdminMasterRoute,
v1AgentClustersRoute,
v1JobDiscoveryStreamRoute,
v1LastSubmittedJobIdStreamRoute,
v1JobStatusStreamRoute,
mock(ResourceClusters.class),
mock(ResourceClusterRouteHandler.class));
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = app.createRoute()
.orElse(v1JobsRoute.createRoute(
Function.identity()))
.flow(
system,
materializer);
logger.info("starting test server on port {}", SERVER_PORT);
binding = http.bindAndHandle(
routeFlow,
ConnectHttp.toHost("localhost", SERVER_PORT),
materializer);
latch.countDown();
} catch (Exception e) {
logger.info("caught exception", e);
latch.countDown();
e.printStackTrace();
}
});
t.setDaemon(true);
t.start();
latch.await();
}
@AfterClass
public static void teardown() {
logger.info("V1JobsRouteTest teardown");
binding.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> system.terminate()); // and shutdown when done
t.interrupt();
}
@Test
public void testIt() throws InterruptedException {
cleanupExistingJobs();
setupJobCluster();
testJobSubmit();
testPostOnJobInstanceEp_NotAllowed();
testPutOnJobInstanceEp_NotAllowed();
testGetLatestJobDiscoveryInfo();
testGetOnJobInstanceActionsEp_NotAllowed();
testValidJobSubmitToNonExistentCluster();
testInvalidJobSubmitToNonExistentCluster();
testGetJobsRouteViaClusterJobsEp();
testGetJobsRouteViaJobsEp();
testGetJobsRouteViaJobsEpCompactResp();
testGetJobsRouteViaClusterJobEpCompactResp();
testGetJobInstanceWithClusterName();
testGetJobInstanceWithoutClusterName();
testGetNonExistentJobInstanceWithoutClusterName();
testGetJobInstanceWithNonMatchingClusterName();
testGetNonExistentJobInstance();
testJobQuickSubmit();
testNonExistentJobQuickSubmit();
testJobResubmitWorker();
testNonExistentJobResubmitWorker();
testJobScaleStage();
testNonExistentJobScaleStage();
testInvalidJobScaleStage();
testJobKill();
testNonExistentJobKill();
}
private void cleanupExistingJobs() throws InterruptedException {
super.deleteClusterIfExist(TEST_CLUSTER);
assert !this.isClusterExist(TEST_CLUSTER);
}
private void setupJobCluster() throws InterruptedException {
testPost(
getJobClustersEndpoint(),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_CREATE),
StatusCodes.CREATED,
null);
assert this.isClusterExist(TEST_CLUSTER);
}
private void testJobSubmit() throws InterruptedException {
testPost(
getClusterJobsEndpoint(TEST_CLUSTER),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_SUBMIT),
StatusCodes.CREATED,
this::validateJobResponse);
}
@Test
public void testPutOnJobsEp_NotAllowed() throws InterruptedException {
testPut(
getJobsEndpoint(),
StatusCodes.METHOD_NOT_ALLOWED,
null);
testPut(
getClusterJobsEndpoint(TEST_CLUSTER),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
@Test
public void testDeleteOnJobsEp_NotAllowed() throws InterruptedException {
testDelete(
getJobsEndpoint(),
StatusCodes.METHOD_NOT_ALLOWED,
null);
testDelete(
getClusterJobsEndpoint(TEST_CLUSTER),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testPostOnJobInstanceEp_NotAllowed() throws InterruptedException {
testPost(
getJobInstanceEndpoint(TEST_JOB_ID),
StatusCodes.METHOD_NOT_ALLOWED,
null);
testPost(
getJobInstanceEndpoint(TEST_CLUSTER, TEST_JOB_ID),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testPutOnJobInstanceEp_NotAllowed() throws InterruptedException {
testPut(
getJobInstanceEndpoint(TEST_JOB_ID),
StatusCodes.METHOD_NOT_ALLOWED,
null);
testPut(
getJobInstanceEndpoint(TEST_CLUSTER, TEST_JOB_ID),
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
private void testGetLatestJobDiscoveryInfo() throws InterruptedException {
testGet(
getJobClusterLatestJobDiscoveryInfoEp(TEST_CLUSTER),
StatusCodes.OK,
this::validateSchedulingInfo);
}
private void testGetOnJobInstanceActionsEp_NotAllowed() throws InterruptedException {
for (String action : new String[]{"resubmitWorker", "quickSubmit", "scaleStage"}) {
testPut(
getJobInstanceEndpoint(TEST_JOB_ID) + "/actions/" + action,
StatusCodes.METHOD_NOT_ALLOWED,
null);
testPut(
getJobInstanceEndpoint(TEST_CLUSTER, TEST_JOB_ID) + "/actions/" + action,
StatusCodes.METHOD_NOT_ALLOWED,
null);
}
}
private void testValidJobSubmitToNonExistentCluster() throws InterruptedException {
testPost(
getClusterJobsEndpoint("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_SUBMIT_NonExistent),
StatusCodes.NOT_FOUND,
(m) -> {
assert m.contains("Job Cluster NonExistent doesn't exist");
});
}
private void testInvalidJobSubmitToNonExistentCluster() throws InterruptedException {
testPost(
getClusterJobsEndpoint("NonExistent"),
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_SUBMIT),
StatusCodes.BAD_REQUEST,
(m) -> {
assert m.contains("Cluster name specified in request payload [sine-function]" +
" does not match with what specified in resource endpoint [NonExistent]");
});
}
private void testGetJobsRouteViaClusterJobsEp() throws InterruptedException {
testGet(
getClusterJobsEndpoint(TEST_CLUSTER),
StatusCodes.OK,
resp -> validateJobsListResponse(resp, 1, false));
}
private void testGetJobsRouteViaJobsEp() throws InterruptedException {
testGet(
getJobsEndpoint(),
StatusCodes.OK,
resp -> validateJobsListResponse(resp, 1, false));
}
private void testGetJobsRouteViaJobsEpCompactResp() throws InterruptedException {
testGet(
getJobsEndpoint() + "?compact=true",
StatusCodes.OK,
resp -> validateJobsListResponse(resp, 1, true));
}
private void testGetJobsRouteViaClusterJobEpCompactResp() throws InterruptedException {
testGet(
getClusterJobsEndpoint(TEST_CLUSTER) + "?compact=true",
StatusCodes.OK,
resp -> validateJobsListResponse(resp, 1, true));
}
private void testGetJobInstanceWithClusterName() throws InterruptedException {
testGet(
getJobInstanceEndpoint(TEST_CLUSTER, TEST_JOB_ID),
StatusCodes.OK,
this::validateJobDetails);
}
private void testGetJobInstanceWithoutClusterName() throws InterruptedException {
testGet(
getJobInstanceEndpoint(TEST_JOB_ID),
StatusCodes.OK,
this::validateJobDetails);
}
private void testGetNonExistentJobInstanceWithoutClusterName() throws InterruptedException {
testGet(
getJobInstanceEndpoint("NonExistent-1"),
StatusCodes.NOT_FOUND,
(m) -> {
assert m.contains("Job NonExistent-1 doesn't exist");
});
}
private void testGetJobInstanceWithNonMatchingClusterName() throws InterruptedException {
testGet(
getJobInstanceEndpoint("NonExistent", TEST_JOB_ID),
StatusCodes.NOT_FOUND,
(m) -> {
assert m.contains("JobId [sine-function-1] exists but does not " +
"belong to specified cluster [NonExistent]");
});
}
private void testGetNonExistentJobInstance() throws InterruptedException {
testGet(
getJobInstanceEndpoint(TEST_CLUSTER, "NonExistent-1"),
StatusCodes.NOT_FOUND,
(m) -> {
assert m.contains("Job NonExistent-1 doesn't exist");
});
}
private void testJobQuickSubmit() throws InterruptedException {
testPost(
getJobsEndpoint() + "/actions/quickSubmit",
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.QUICK_SUBMIT),
StatusCodes.CREATED,
this::validateJobResponse);
}
private void testNonExistentJobQuickSubmit() throws InterruptedException {
testPost(
getJobsEndpoint() + "/actions/quickSubmit",
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.QUICK_SUBMIT_NONEXISTENT),
StatusCodes.NOT_FOUND,
null);
}
private void testJobResubmitWorker() throws InterruptedException {
testPost(
getJobInstanceEndpoint(TEST_JOB_ID) + "/actions/resubmitWorker",
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobPayloads.RESUBMIT_WORKER),
StatusCodes.NO_CONTENT,
null);
}
private void testNonExistentJobResubmitWorker() throws InterruptedException {
testPost(
getJobInstanceEndpoint("NonExistent-1") + "/actions/resubmitWorker",
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobPayloads.RESUBMIT_WORKER_NONEXISTENT),
StatusCodes.NOT_FOUND,
null);
}
private void testJobScaleStage() throws InterruptedException {
testPost(
getJobInstanceEndpoint(TEST_JOB_ID) + "/actions/scaleStage",
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobPayloads.SCALE_STAGE),
StatusCodes.NO_CONTENT,
null);
}
private void testNonExistentJobScaleStage() throws InterruptedException {
testPost(
getJobInstanceEndpoint("NonExistent-1") + "/actions/scaleStage",
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobPayloads.SCALE_STAGE_NonExistent),
StatusCodes.NOT_FOUND,
null);
}
private void testInvalidJobScaleStage() throws InterruptedException {
testPost(
getJobInstanceEndpoint("NonExistent-1") + "/actions/scaleStage",
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobPayloads.SCALE_STAGE),
StatusCodes.BAD_REQUEST,
(m) -> {
assert m.contains("JobId specified in request payload [sine-function-1] " +
"does not match with resource uri [NonExistent-1]");
});
}
private void testJobKill() throws InterruptedException {
testDelete(
getJobInstanceEndpoint(TEST_JOB_ID) + "?user=test&reason=unittest",
StatusCodes.ACCEPTED,
null);
}
private void testNonExistentJobKill() throws InterruptedException {
testDelete(
getJobInstanceEndpoint("NonExistent-1") + "?user=test&reason=unittest",
StatusCodes.NOT_FOUND,
null);
}
private void validateJobResponse(String resp) {
try {
assert !Strings.isNullOrEmpty(resp);
ObjectMapper mapper = new ObjectMapper();
JsonNode responseObj = mapper.readTree(resp);
assert responseObj.get("jobMetadata").get("name").asText().equals(TEST_CLUSTER);
assert responseObj.get("jobMetadata").get("jobId").asText().startsWith("sine-function-");
assert responseObj.get("jobMetadata").get("sla") != null;
assert responseObj.get("jobMetadata").get("labels") != null;
assert responseObj.get("stageMetadataList") != null;
assert responseObj.get("workerMetadataList") != null;
} catch (IOException ex) {
logger.error("Failed to validate job response: " + ex.getMessage());
assert false;
}
}
private void validateJobDetails(String resp) {
try {
assert !Strings.isNullOrEmpty(resp);
ObjectMapper mapper = new ObjectMapper();
JsonNode responseObj = mapper.readTree(resp);
validateJobsListItem(responseObj,false);
} catch (IOException ex) {
logger.error("Failed to validate job details response: " + ex.getMessage());
assert false;
}
}
private void validateSchedulingInfo(String s) {
try {
assert !Strings.isNullOrEmpty(s);
JobSchedulingInfo jsi = Jackson.fromJSON(s, JobSchedulingInfo.class);
assert jsi.getJobId().equals(TEST_JOB_ID);
Map<Integer, WorkerAssignments> wa = jsi.getWorkerAssignments();
assert wa.size() == 2;
assert wa.containsKey(0);
assert wa.get(0).getNumWorkers() == 1;
assert wa.containsKey(1);
assert wa.get(1).getNumWorkers() == 1;
} catch (IOException e) {
logger.error("caught unexpected exc {}", e.getMessage(), e);
assert false;
}
}
private void validateJobsListResponse(String resp, int expectedJobsCount, boolean isCompact) {
try {
assert !Strings.isNullOrEmpty(resp);
ObjectMapper mapper = new ObjectMapper();
JsonNode responseObj = mapper.readTree(resp).get("list");
assert responseObj.size() == expectedJobsCount;
for (int i = 0; i < expectedJobsCount; i++) {
validateJobsListItem(responseObj.get(i), isCompact);
}
} catch (IOException ex) {
logger.error("Failed to validate job response: " + ex.getMessage());
assert false;
}
}
private void validateJobDefinition(JsonNode responseObj) {
assert responseObj != null;
assert responseObj.get("name").asText().equals(TEST_CLUSTER);
assert responseObj.get("artifactName").asText().equals(
"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/" +
"mantis-examples-sine-function-0.2.9.zip");
assert responseObj.get("parameters").size() == 2;
assert responseObj.get("jobSla").get("durationType").asText().equals("Perpetual");
assert responseObj.get("numberOfStages").asInt() == 2;
assert responseObj.get("schedulingInfo") != null;
assert responseObj.get("labels").size() == 7;
}
private void validateJobsListItem(JsonNode responseObj, boolean isCompact) {
assert responseObj != null;
if (isCompact) {
assert responseObj.get("jobMetadata") == null;
assert responseObj.get("stageMetadataList") == null;
assert responseObj.get("workerMetadataList") == null;
assert responseObj.get("submittedAt") != null;
assert responseObj.get("user") != null;
assert responseObj.get("type").asText().equals("Perpetual");
assert responseObj.get("numStages").asInt() == 2;
assert responseObj.get("numWorkers").asInt() == 2;
assert responseObj.get("totCPUs").asInt() == 2;
assert responseObj.get("totMemory").asInt() == 400;
assert responseObj.get("labels").size() == 7;
assert responseObj.get("jobId").asText().startsWith("sine-function-");
} else {
assert responseObj.get("jobMetadata")
.get("jobId")
.asText()
.startsWith("sine-function-");
assert responseObj.get("jobMetadata").get("name").asText().equals("sine-function");
assert responseObj.get("jobMetadata").get("jarUrl").asText().equals(
"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/" +
"mantis-examples-sine-function-0.2.9.zip");
assert responseObj.get("jobMetadata").get("numStages").asInt() == 2;
assert responseObj.get("jobMetadata").get("parameters").size() == 2;
assert responseObj.get("jobMetadata").get("labels").size() == 7;
assert responseObj.get("jobMetadata") != null;
assert responseObj.get("stageMetadataList") != null;
assert responseObj.get("workerMetadataList") != null;
assert responseObj.get("stageMetadataList").size() == 2;
assert responseObj.get("workerMetadataList").size() == 2;
}
}
}
| 7,912 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v1/AdminMasterRouteTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import akka.NotUsed;
import akka.http.javadsl.ConnectHttp;
import akka.http.javadsl.Http;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.stream.Materializer;
import akka.stream.javadsl.Flow;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.jobcluster.job.JobTestHelper;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import java.util.List;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AdminMasterRouteTest extends RouteTestBase {
private final static Logger logger = LoggerFactory.getLogger(AdminMasterRouteTest.class);
private static Thread t;
private static final int ADMIN_MASTER_PORT = 8205;
private static final MasterDescription fakeMasterDesc = new MasterDescription(
"localhost",
"127.0.0.1", ADMIN_MASTER_PORT,
ADMIN_MASTER_PORT + 2,
-1,
"api/v1/jobs/actions/postJobStatus",
-1,
System.currentTimeMillis());
private static CompletionStage<ServerBinding> binding;
private static final AdminMasterRoute masterDescRoute;
static {
TestHelpers.setupMasterConfig();
masterDescRoute = new AdminMasterRoute(fakeMasterDesc);
}
public AdminMasterRouteTest(){
super("MasterDescriptionRouteTest", 8205);
}
@BeforeClass
public static void setup() throws Exception {
JobTestHelper.deleteAllFiles();
JobTestHelper.createDirsIfRequired();
final CountDownLatch latch = new CountDownLatch(1);
t = new Thread(() -> {
try {
// boot up server using the route as defined below
final Http http = Http.get(system);
final Materializer materializer = Materializer.createMaterializer(system);
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = masterDescRoute.createRoute(Function.identity()).flow(system, materializer);
logger.info("starting test server on port {}", ADMIN_MASTER_PORT);
binding = http.bindAndHandle(routeFlow,
ConnectHttp.toHost("localhost", ADMIN_MASTER_PORT), materializer);
latch.countDown();
} catch (Exception e) {
logger.info("caught exception", e);
latch.countDown();
e.printStackTrace();
}
});
t.setDaemon(true);
t.start();
latch.await();
}
@AfterClass
public static void teardown() {
logger.info("MasterDescriptionRouteTest teardown");
binding
.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> system.terminate()); // and shutdown when done
t.interrupt();
}
private String masterEndpoint(final String ep) {
return String.format("http://127.0.0.1:%d/api/v1/%s", ADMIN_MASTER_PORT, ep);
}
@Test
public void testMasterInfoAPI() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(masterEndpoint("masterInfo")));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
try {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
MasterDescription masterDescription = Jackson.fromJSON(responseMessage, MasterDescription.class);
logger.info("master desc ---> {}", masterDescription);
assertEquals(fakeMasterDesc, masterDescription);
} catch (Exception e) {
fail("unexpected error "+ e.getMessage());
}
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
@Test
public void testMasterConfigAPI() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(masterEndpoint("masterConfigs")));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
try {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
List<AdminMasterRoute.Configlet> masterconfig = Jackson.fromJSON(responseMessage,
new TypeReference<List<AdminMasterRoute.Configlet>>() {});
logger.info("master config ---> {}", masterconfig);
assertEquals(masterDescRoute.getConfigs(), masterconfig);
} catch (Exception e) {
fail("unexpected error "+ e.getMessage());
}
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
}
| 7,913 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v1/AgentClustersRouteTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.http.javadsl.ConnectHttp;
import akka.http.javadsl.Http;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.ContentTypes;
import akka.http.javadsl.model.HttpEntities;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.stream.ActorMaterializer;
import akka.stream.javadsl.Flow;
import com.netflix.fenzo.AutoScaleAction;
import com.netflix.fenzo.AutoScaleRule;
import com.netflix.fenzo.VirtualMachineLease;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.JobClustersManagerActor;
import io.mantisrx.master.api.akka.payloads.AgentClusterPayloads;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.scheduler.FakeMantisScheduler;
import io.mantisrx.master.scheduler.JobMessageRouterImpl;
import io.mantisrx.master.vm.AgentClusterOperations;
import io.mantisrx.master.vm.AgentClusterOperationsImpl;
import io.mantisrx.server.master.AgentClustersAutoScaler;
import io.mantisrx.server.master.persistence.FileBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
public class AgentClustersRouteTest extends RouteTestBase {
private final static Logger logger = LoggerFactory.getLogger(AgentClustersRouteTest.class);
private static Thread t;
private static final int serverPort = 8202;
private static final ObjectMapper mapper = new ObjectMapper().configure(
DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES,
false);
private static String SERVER_ENDPOINT = String.format(
"http://127.0.0.1:%d/api/v1/agentClusters",
serverPort);
private static CompletionStage<ServerBinding> binding;
public AgentClustersRouteTest() {
super("AgentClusterRoutes", 8202);
}
@BeforeClass
public static void setup() throws InterruptedException {
TestHelpers.setupMasterConfig();
final CountDownLatch latch = new CountDownLatch(1);
t = new Thread(() -> {
try {
// boot up server using the route as defined below
final Http http = Http.get(system);
final ActorMaterializer materializer = ActorMaterializer.create(system);
IMantisPersistenceProvider storageProvider = new FileBasedPersistenceProvider(true);
final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(
new AuditEventSubscriberLoggingImpl(),
new StatusEventSubscriberLoggingImpl(),
new WorkerEventSubscriberLoggingImpl());
ActorRef jobClustersManagerActor = system.actorOf(
JobClustersManagerActor.props(
new MantisJobStore(storageProvider),
lifecycleEventPublisher,
CostsCalculator.noop()),
"jobClustersManager");
MantisSchedulerFactory fakeSchedulerFactory = mock(MantisSchedulerFactory.class);
MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor);
when(fakeSchedulerFactory.forJob(any())).thenReturn(fakeScheduler);
jobClustersManagerActor.tell(
new JobClusterManagerProto.JobClustersManagerInitialize(
fakeSchedulerFactory,
false),
ActorRef.noSender());
setupDummyAgentClusterAutoScaler();
final AgentClustersRoute agentClusterV2Route = new AgentClustersRoute(
new AgentClusterOperationsImpl(
storageProvider,
new JobMessageRouterImpl(jobClustersManagerActor),
fakeScheduler,
lifecycleEventPublisher,
"cluster"));
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = agentClusterV2Route.createRoute(
Function.identity()).flow(system, materializer);
logger.info("test server starting on port {}", serverPort);
binding = http.bindAndHandle(routeFlow,
ConnectHttp.toHost("localhost", serverPort),
materializer);
latch.countDown();
} catch (Exception e) {
logger.info("caught exception", e);
latch.countDown();
e.printStackTrace();
}
});
t.setDaemon(true);
t.start();
latch.await();
}
@AfterClass
public static void teardown() {
logger.info("V1AgentClusterRouteTest teardown");
binding
.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> system.terminate()); // and shutdown when done
t.interrupt();
}
private static void setupDummyAgentClusterAutoScaler() {
final AutoScaleRule dummyAutoScaleRule = new AutoScaleRule() {
@Override
public String getRuleName() {
return "test";
}
@Override
public int getMinIdleHostsToKeep() {
return 1;
}
@Override
public int getMaxIdleHostsToKeep() {
return 10;
}
@Override
public long getCoolDownSecs() {
return 300;
}
@Override
public boolean idleMachineTooSmall(VirtualMachineLease lease) {
return false;
}
@Override
public int getMinSize() {
return 1;
}
@Override
public int getMaxSize() {
return 100;
}
};
try {
AgentClustersAutoScaler.initialize(() -> new HashSet<>(Collections.singletonList(
dummyAutoScaleRule)), new Observer<AutoScaleAction>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(AutoScaleAction autoScaleAction) {
}
});
} catch (Exception e) {
logger.info("AgentClustersAutoScaler is already initialized by another test", e);
}
}
@Test
public void testIt() throws InterruptedException {
testSetActiveAgentClusters();
testGetJobsOnAgentClusters();
testGetActiveAgentClusters();
}
private void testSetActiveAgentClusters() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(SERVER_ENDPOINT)
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
AgentClusterPayloads.SET_ACTIVE)));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
latch.countDown();
});
assertTrue(latch.await(10, TimeUnit.SECONDS));
}
private void testGetJobsOnAgentClusters() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(SERVER_ENDPOINT + "/jobs"));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
// TODO validate jobs on VM response
assertEquals("{}", responseMessage);
latch.countDown();
});
assertTrue(latch.await(1, TimeUnit.SECONDS));
}
private void testGetAutoScalePolicy() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(SERVER_ENDPOINT + "/autoScalePolicy"));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
try {
Map<String, AgentClusterOperations.AgentClusterAutoScaleRule> agentClusterAutoScaleRule = mapper
.readValue(
responseMessage,
new TypeReference<Map<String, AgentClusterOperations.AgentClusterAutoScaleRule>>() {
});
agentClusterAutoScaleRule.values().forEach(autoScaleRule -> {
assertEquals("test", autoScaleRule.getName());
assertEquals(300, autoScaleRule.getCooldownSecs());
assertEquals(1, autoScaleRule.getMinIdle());
assertEquals(10, autoScaleRule.getMaxIdle());
assertEquals(1, autoScaleRule.getMinSize());
assertEquals(100, autoScaleRule.getMaxSize());
});
} catch (IOException e) {
logger.error("caught error", e);
fail("failed to deserialize response");
}
latch.countDown();
});
assertTrue(latch.await(1, TimeUnit.SECONDS));
}
private void testGetActiveAgentClusters() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(SERVER_ENDPOINT));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
assertEquals(AgentClusterPayloads.SET_ACTIVE, responseMessage);
latch.countDown();
});
assertTrue(latch.await(1, TimeUnit.SECONDS));
}
}
| 7,914 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v1/ResourceClusterNonLeaderRedirectRouteTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v1;
import static io.mantisrx.master.api.akka.payloads.ResourceClustersPayloads.CLUSTER_ID;
import static io.mantisrx.master.api.akka.payloads.ResourceClustersPayloads.RESOURCE_CLUSTER_DISABLE_TASK_EXECUTORS_ATTRS;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.http.javadsl.model.ContentTypes;
import akka.http.javadsl.model.HttpEntities;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.testkit.JUnitRouteTest;
import akka.http.javadsl.testkit.TestRoute;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.common.Ack;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.master.api.akka.payloads.ResourceClustersPayloads;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.api.akka.route.handlers.ResourceClusterRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.ResourceClusterRouteHandlerAkkaImpl;
import io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode;
import io.mantisrx.master.resourcecluster.ResourceClustersHostManagerActor;
import io.mantisrx.master.resourcecluster.proto.MantisResourceClusterEnvType;
import io.mantisrx.master.resourcecluster.proto.MantisResourceClusterSpec;
import io.mantisrx.master.resourcecluster.proto.ProvisionResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.DeleteResourceClusterResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.GetResourceClusterResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.ListResourceClustersResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.ListResourceClustersResponse.RegisteredResourceCluster;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterProvisionSubmissionResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.CreateAllResourceClusterScaleRulesRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.GetResourceClusterScaleRulesResponse;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceRequest;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceResponse;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersRequest;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersResponse;
import io.mantisrx.master.resourcecluster.resourceprovider.NoopResourceClusterResponseHandler;
import io.mantisrx.master.resourcecluster.resourceprovider.ResourceClusterProvider;
import io.mantisrx.master.resourcecluster.resourceprovider.ResourceClusterProviderAdapter;
import io.mantisrx.master.resourcecluster.resourceprovider.ResourceClusterProviderUpgradeRequest;
import io.mantisrx.master.resourcecluster.resourceprovider.ResourceClusterResponseHandler;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.persistence.InMemoryPersistenceProvider;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.PagedActiveJobOverview;
import io.mantisrx.server.master.resourcecluster.ResourceCluster;
import io.mantisrx.server.master.resourcecluster.ResourceCluster.TaskExecutorStatus;
import io.mantisrx.server.master.resourcecluster.ResourceClusters;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.io.IOException;
import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.Collections;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.ArgumentMatchers;
public class ResourceClusterNonLeaderRedirectRouteTest extends JUnitRouteTest {
private static final UnitTestResourceProviderAdapter resourceProviderAdapter =
new UnitTestResourceProviderAdapter();
private final ResourceClusters resourceClusters = mock(ResourceClusters.class);
private final ActorSystem system =
ActorSystem.create(ResourceClusterNonLeaderRedirectRouteTest.class.getSimpleName());
private final IMantisPersistenceProvider storageProvider = new InMemoryPersistenceProvider();
private final ActorRef resourceClustersHostManagerActorWithNoopAdapter = system.actorOf(
ResourceClustersHostManagerActor.props(
new ResourceClusterProviderAdapter(ConfigurationProvider.getConfig().getResourceClusterProvider(), system),
storageProvider),
"jobClustersManagerNoop");
private final ActorRef resourceClustersHostManagerActorWithTestAdapter = system.actorOf(
ResourceClustersHostManagerActor.props(resourceProviderAdapter, storageProvider),
"jobClustersManagerTest");
private final ResourceClusterRouteHandler resourceClusterRouteHandlerWithNoopAdapter =
new ResourceClusterRouteHandlerAkkaImpl(resourceClustersHostManagerActorWithNoopAdapter);
private final ResourceClusterRouteHandler resourceClusterRouteHandlerWithTestAdapter =
new ResourceClusterRouteHandlerAkkaImpl(resourceClustersHostManagerActorWithTestAdapter);
private final TestRoute testRouteWithNoopAdapter =
testRoute(new ResourceClustersNonLeaderRedirectRoute(resourceClusters, resourceClusterRouteHandlerWithNoopAdapter, system)
.createRoute(route -> route));
private final TestRoute testRoute =
testRoute(new ResourceClustersNonLeaderRedirectRoute(resourceClusters, resourceClusterRouteHandlerWithTestAdapter, system)
.createRoute(route -> route));
@BeforeClass
public static void init() {
TestHelpers.setupMasterConfig();
}
@Test
public void testGetTaskExecutorState() {
TaskExecutorRegistration registration =
TaskExecutorRegistration.builder()
.taskExecutorID(TaskExecutorID.of("myExecutor"))
.clusterID(ClusterID.of("myCluster"))
.taskExecutorAddress("taskExecutorAddress")
.hostname("hostName")
.workerPorts(new WorkerPorts(1, 2, 3, 4, 5))
.machineDefinition(new MachineDefinition(1, 1, 1, 1, 1))
.taskExecutorAttributes(ImmutableMap.of())
.build();
TaskExecutorStatus status =
new TaskExecutorStatus(registration, true, true, true, false, null, Instant.now().toEpochMilli());
ResourceCluster resourceCluster = mock(ResourceCluster.class);
when(resourceCluster.getTaskExecutorState(TaskExecutorID.of("myExecutor")))
.thenReturn(CompletableFuture.completedFuture(status));
when(resourceClusters.getClusterFor(ClusterID.of("myCluster"))).thenReturn(resourceCluster);
testRouteWithNoopAdapter.run(HttpRequest.GET("/api/v1/resourceClusters/myCluster/taskExecutors/myExecutor/getTaskExecutorState"))
.assertStatusCode(200)
.assertEntityAs(Jackson.unmarshaller(TaskExecutorStatus.class), status);
}
@Test
public void testGetActiveJobOverview() {
PagedActiveJobOverview overview1 = new PagedActiveJobOverview(ImmutableList.of(), 0);
PagedActiveJobOverview overview2 = new PagedActiveJobOverview(ImmutableList.of("test"), 1);
PagedActiveJobOverview overview3 = new PagedActiveJobOverview(ImmutableList.of("test"), 99);
ResourceCluster resourceCluster = mock(ResourceCluster.class);
when(resourceCluster.getActiveJobOverview(Optional.empty(), Optional.empty()))
.thenReturn(CompletableFuture.completedFuture(overview1));
when(resourceCluster.getActiveJobOverview(Optional.of(0), Optional.of(9)))
.thenReturn(CompletableFuture.completedFuture(overview2));
when(resourceCluster.getActiveJobOverview(Optional.empty(), Optional.of(99)))
.thenReturn(CompletableFuture.completedFuture(overview3));
when(resourceClusters.getClusterFor(ClusterID.of("myCluster"))).thenReturn(resourceCluster);
testRouteWithNoopAdapter.run(HttpRequest.GET(
"/api/v1/resourceClusters/myCluster/activeJobOverview"))
.assertStatusCode(200)
.assertEntityAs(Jackson.unmarshaller(PagedActiveJobOverview.class), overview1);
testRouteWithNoopAdapter.run(HttpRequest.GET(
"/api/v1/resourceClusters/myCluster/activeJobOverview?startingIndex=0&pageSize=9"))
.assertStatusCode(200)
.assertEntityAs(Jackson.unmarshaller(PagedActiveJobOverview.class), overview2);
testRouteWithNoopAdapter.run(HttpRequest.GET(
"/api/v1/resourceClusters/myCluster/activeJobOverview?pageSize=99"))
.assertStatusCode(200)
.assertEntityAs(Jackson.unmarshaller(PagedActiveJobOverview.class), overview3);
}
@Test
public void testDisableTaskExecutorsRoute() {
// set up the mocks
ResourceCluster resourceCluster = mock(ResourceCluster.class);
when(resourceCluster.disableTaskExecutorsFor(
ArgumentMatchers.eq(RESOURCE_CLUSTER_DISABLE_TASK_EXECUTORS_ATTRS),
ArgumentMatchers.argThat(expiry ->
expiry.isAfter(Instant.now().plus(Duration.ofHours(17))) &&
expiry.isBefore(Instant.now().plus(Duration.ofHours(20)))),
ArgumentMatchers.eq(Optional.empty())))
.thenReturn(CompletableFuture.completedFuture(Ack.getInstance()));
when(resourceClusters.getClusterFor(ClusterID.of("myCluster"))).thenReturn(resourceCluster);
// set up the HTTP request that needs to be issued
HttpRequest request =
HttpRequest
.POST("/api/v1/resourceClusters/myCluster/disableTaskExecutors")
.withEntity(
HttpEntities.create(
ContentTypes.APPLICATION_JSON,
ResourceClustersPayloads.RESOURCE_CLUSTER_DISABLE_TASK_EXECUTORS_PAYLOAD));
// make the request and verify the response
testRouteWithNoopAdapter
.run(request)
.assertStatusCode(200)
.assertEntityAs(Jackson.unmarshaller(Ack.class), Ack.getInstance());
}
@Test
public void testResourceClusterHostRoutesNoopAdapter() throws IOException {
// test get empty clusters (nothing has been registered).
testRouteWithNoopAdapter.run(HttpRequest.GET(getResourceClusterEndpoint()))
.assertStatusCode(StatusCodes.OK)
.assertEntityAs(Jackson.unmarshaller(ListResourceClustersResponse.class),
ListResourceClustersResponse.builder()
.responseCode(ResponseCode.SUCCESS)
.registeredResourceClusters(Collections.emptyList())
.build());
// should return error due to NoopResourceClusterProvider.
testRouteWithNoopAdapter.run(
HttpRequest.POST(getResourceClusterEndpoint(CLUSTER_ID) + "/scaleSku")
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
ResourceClustersPayloads.RESOURCE_CLUSTER_SKU_SCALE)))
.assertStatusCode(StatusCodes.INTERNAL_SERVER_ERROR);
}
@Test
public void testResourceClusterHostRelatedRoutes() throws IOException {
// test get empty clusters (nothing has been registered).
testRoute.run(HttpRequest.GET(getResourceClusterEndpoint()))
.assertStatusCode(StatusCodes.OK)
.assertEntityAs(Jackson.unmarshaller(ListResourceClustersResponse.class),
ListResourceClustersResponse.builder()
.responseCode(ResponseCode.SUCCESS)
.registeredResourceClusters(Collections.emptyList())
.build());
// test register new cluster.
ProvisionResourceClusterRequest provisionReq1 = Jackson.fromJSON(
ResourceClustersPayloads.RESOURCE_CLUSTER_CREATE,
ProvisionResourceClusterRequest.class);
testRoute.run(
HttpRequest.POST(getResourceClusterEndpoint())
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
ResourceClustersPayloads.RESOURCE_CLUSTER_CREATE)))
.assertStatusCode(StatusCodes.ACCEPTED)
.assertEntityAs(
Jackson.unmarshaller(MantisResourceClusterSpec.class),
provisionReq1.getClusterSpec());
// test get one registered cluster.
testRoute.run(HttpRequest.GET(getResourceClusterEndpoint()))
.assertStatusCode(StatusCodes.OK)
.assertEntityAs(Jackson.unmarshaller(ListResourceClustersResponse.class),
ListResourceClustersResponse.builder()
// TODO this responseCode field is currently not covered due to lombok issue at {@link ListResourceClustersResponse}.
.responseCode(ResponseCode.SUCCESS)
.registeredResourceClusters(
Arrays.asList(RegisteredResourceCluster.builder()
.id(ClusterID.of(CLUSTER_ID)).version("").build()))
.build());
// test get registered cluster spec
testRoute.run(HttpRequest.GET(getResourceClusterEndpoint(CLUSTER_ID)))
.assertStatusCode(StatusCodes.OK)
.assertEntityAs(Jackson.unmarshaller(GetResourceClusterResponse.class),
GetResourceClusterResponse.builder()
.responseCode(ResponseCode.SUCCESS)
.clusterSpec(provisionReq1.getClusterSpec())
.build());
// test scale cluster sku
testRoute.run(
HttpRequest.POST(getResourceClusterEndpoint(CLUSTER_ID) + "/scaleSku")
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
ResourceClustersPayloads.RESOURCE_CLUSTER_SKU_SCALE)))
.assertStatusCode(StatusCodes.ACCEPTED)
.assertEntityAs(
Jackson.unmarshaller(ScaleResourceResponse.class),
Jackson.fromJSON(
ResourceClustersPayloads.RESOURCE_CLUSTER_SCALE_RESULT,
ScaleResourceResponse.class));
// test de-register cluster.
testRoute.run(
HttpRequest.DELETE(getResourceClusterEndpoint(CLUSTER_ID)))
.assertStatusCode(StatusCodes.OK)
.assertEntityAs(
Jackson.unmarshaller(DeleteResourceClusterResponse.class),
DeleteResourceClusterResponse.builder().responseCode(ResponseCode.SUCCESS).build());
// test get registered cluster list
testRoute.run(HttpRequest.GET(getResourceClusterEndpoint()))
.assertStatusCode(StatusCodes.OK)
.assertEntityAs(Jackson.unmarshaller(ListResourceClustersResponse.class),
ListResourceClustersResponse.builder()
.responseCode(ResponseCode.SUCCESS)
.registeredResourceClusters(Collections.emptyList())
.build());
// test get de-registered cluster (404)
testRoute.run(HttpRequest.GET(getResourceClusterEndpoint(CLUSTER_ID)))
.assertStatusCode(StatusCodes.NOT_FOUND);
}
@Test
public void testResourceClusterScaleRulesRoutes() throws IOException {
ResourceCluster resourceCluster = mock(ResourceCluster.class);
when(resourceCluster.refreshClusterScalerRuleSet())
.thenReturn(CompletableFuture.completedFuture(Ack.getInstance()));
when(resourceClusters.getClusterFor(ClusterID.of(CLUSTER_ID))).thenReturn(resourceCluster);
// test get empty cluster rule.
testRoute.run(HttpRequest.GET(getResourceClusterScaleRulesEndpoint(CLUSTER_ID)))
.assertStatusCode(StatusCodes.OK)
.assertEntityAs(Jackson.unmarshaller(GetResourceClusterScaleRulesResponse.class),
GetResourceClusterScaleRulesResponse.builder()
.responseCode(ResponseCode.SUCCESS)
.clusterId(ClusterID.of(CLUSTER_ID))
.rules(Collections.emptyList())
.build());
// test register new cluster rule.
CreateAllResourceClusterScaleRulesRequest createRuleReq1 = Jackson.fromJSON(
ResourceClustersPayloads.RESOURCE_CLUSTER_SCALE_RULES_CREATE,
CreateAllResourceClusterScaleRulesRequest.class);
testRoute.run(
HttpRequest.POST(getResourceClusterScaleRulesEndpoint(CLUSTER_ID))
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
ResourceClustersPayloads.RESOURCE_CLUSTER_SCALE_RULES_CREATE)))
.assertStatusCode(StatusCodes.ACCEPTED)
.assertEntityAs(Jackson.unmarshaller(GetResourceClusterScaleRulesResponse.class),
GetResourceClusterScaleRulesResponse.builder()
.responseCode(ResponseCode.SUCCESS)
.clusterId(ClusterID.of(CLUSTER_ID))
.rules(createRuleReq1.getRules())
.build());
// test get two cluster rules.
testRoute.run(HttpRequest.GET(getResourceClusterScaleRulesEndpoint(CLUSTER_ID)))
.assertStatusCode(StatusCodes.OK)
.assertEntityAs(Jackson.unmarshaller(GetResourceClusterScaleRulesResponse.class),
GetResourceClusterScaleRulesResponse.builder()
.responseCode(ResponseCode.SUCCESS)
.clusterId(ClusterID.of(CLUSTER_ID))
.rules(createRuleReq1.getRules())
.build());
testRoute.run(
HttpRequest.POST(getResourceClusterScaleRuleEndpoint(CLUSTER_ID))
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
ResourceClustersPayloads.RESOURCE_CLUSTER_SINGLE_SCALE_RULE_CREATE)))
.assertStatusCode(StatusCodes.ACCEPTED)
.assertEntityAs(Jackson.unmarshaller(GetResourceClusterScaleRulesResponse.class),
Jackson.fromJSON(
ResourceClustersPayloads.RESOURCE_CLUSTER_SCALE_RULES_RESULT,
GetResourceClusterScaleRulesResponse.class));
testRoute.run(HttpRequest.GET(getResourceClusterScaleRulesEndpoint(CLUSTER_ID)))
.assertStatusCode(StatusCodes.OK)
.assertEntityAs(Jackson.unmarshaller(GetResourceClusterScaleRulesResponse.class),
Jackson.fromJSON(
ResourceClustersPayloads.RESOURCE_CLUSTER_SCALE_RULES_RESULT,
GetResourceClusterScaleRulesResponse.class));
verify(resourceCluster).refreshClusterScalerRuleSet();
}
@Test
public void testResourceClusterUpgradeRoutes() throws IOException {
UpgradeClusterContainersRequest createRuleReq1 = UpgradeClusterContainersRequest.builder()
.clusterId(ClusterID.of(CLUSTER_ID))
.region("us-east-1")
.optionalBatchMaxSize(50)
.optionalSkuId("large")
.optionalEnvType(MantisResourceClusterEnvType.Prod)
.build();
testRoute.run(
HttpRequest.POST(getResourceClusterUpgradeEndpoint(CLUSTER_ID))
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
Jackson.toJson(createRuleReq1))))
.assertStatusCode(StatusCodes.ACCEPTED)
.assertEntityAs(Jackson.unmarshaller(UpgradeClusterContainersResponse.class),
UpgradeClusterContainersResponse.builder()
.responseCode(ResponseCode.SUCCESS)
.clusterId(createRuleReq1.getClusterId())
.region(createRuleReq1.getRegion())
.optionalSkuId(createRuleReq1.getOptionalSkuId())
.optionalEnvType(createRuleReq1.getOptionalEnvType())
.build());
}
final String getResourceClusterEndpoint() {
return "/api/v1/resourceClusters";
}
final String getResourceClusterEndpoint(String clusterId) {
return String.format(
"/api/v1/resourceClusters/%s",
clusterId);
}
final String getResourceClusterScaleRulesEndpoint(String clusterId) {
return String.format(
"/api/v1/resourceClusters/%s/scaleRules",
clusterId);
}
final String getResourceClusterScaleRuleEndpoint(String clusterId) {
return String.format(
"/api/v1/resourceClusters/%s/scaleRule",
clusterId);
}
final String getResourceClusterUpgradeEndpoint(String clusterId) {
return String.format(
"/api/v1/resourceClusters/%s/upgrade",
clusterId);
}
public static class UnitTestResourceProviderAdapter implements ResourceClusterProvider {
private ResourceClusterProvider injectedProvider;
public void setInjectedProvider(ResourceClusterProvider injectedProvider) {
this.injectedProvider = injectedProvider;
}
public void resetInjectedProvider() {
this.injectedProvider = null;
}
@Override
public CompletionStage<ResourceClusterProvisionSubmissionResponse> provisionClusterIfNotPresent(
ProvisionResourceClusterRequest clusterSpec) {
if (this.injectedProvider != null) return this.injectedProvider.provisionClusterIfNotPresent(clusterSpec);
return CompletableFuture.supplyAsync(() -> {
try {
Thread.sleep(500);
return ResourceClusterProvisionSubmissionResponse.builder().response("mock resp").build();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
}
@Override
public CompletionStage<ScaleResourceResponse> scaleResource(ScaleResourceRequest scaleRequest) {
if (this.injectedProvider != null) return this.injectedProvider.scaleResource(scaleRequest);
return CompletableFuture.completedFuture(
ScaleResourceResponse.builder()
.message("test scale resp")
.region(scaleRequest.getRegion())
.skuId(scaleRequest.getSkuId())
.clusterId(scaleRequest.getClusterId())
.envType(scaleRequest.getEnvType().get())
.desireSize(scaleRequest.getDesireSize())
.responseCode(ResponseCode.SUCCESS)
.build());
}
@Override
public CompletionStage<UpgradeClusterContainersResponse> upgradeContainerResource(
ResourceClusterProviderUpgradeRequest request) {
return CompletableFuture.completedFuture(
UpgradeClusterContainersResponse.builder()
.message("test scale resp")
.region(request.getRegion())
.optionalSkuId(request.getOptionalSkuId())
.clusterId(request.getClusterId())
.optionalEnvType(request.getOptionalEnvType())
.responseCode(ResponseCode.SUCCESS)
.build());
}
@Override
public ResourceClusterResponseHandler getResponseHandler() {
if (this.injectedProvider != null) return this.injectedProvider.getResponseHandler();
return new NoopResourceClusterResponseHandler();
}
}
}
| 7,915 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/pagination/ListObjectTests.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.pagination;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.junit.Test;
import scala.Tuple1;
public class ListObjectTests {
private static final Random rnd = new Random(System.currentTimeMillis());
@Test(expected = RuntimeException.class)
public void testSortingByInvalidFieldName() {
try {
ListObject<TestObject> listobject = new ListObject.Builder<TestObject>()
.withObjects(generateList(10), TestObject.class)
.withSortField("invalidValue")
.withSortAscending(true)
.build();
} catch (Exception e) {
assertTrue(e.getMessage().contains("Specified sort field is invalid."));
throw e;
}
}
@Test
public void testSortingByNullFieldName() throws RuntimeException {
ArrayList<TestObject> objects = generateList(10);
// if not specifying sort field, the returned list should be in original order
List<TestObject> list = new ListObject.Builder<TestObject>()
.withObjects((List<TestObject>) objects.clone(), TestObject.class)
.withSortField(null)
.withSortAscending(true)
.build().list;
for (int i = 0; i < objects.size(); i++) {
assert objects.get(i).publicValue == list.get(i).publicValue;
}
}
@Test
public void testSortingByEmptyFieldName() throws RuntimeException {
ArrayList<TestObject> objects = generateList(10);
// if not specifying sort field, the returned list should be in original order
List<TestObject> list = new ListObject.Builder<TestObject>()
.withObjects((List<TestObject>) objects.clone(), TestObject.class)
.withSortField("")
.withSortAscending(true)
.build().list;
for (int i = 0; i < objects.size(); i++) {
assert objects.get(i).publicValue == list.get(i).publicValue;
}
}
@Test
public void testSortingByPublicValueFieldName() {
List<TestObject> objects = generateList(10);
List<TestObject> sortedList = new ListObject.Builder<TestObject>()
.withObjects(objects, TestObject.class)
.withSortField("publicValue")
.withSortAscending(true)
.build().list;
assert sortedList.size() == objects.size();
int prevValue = sortedList.get(0).publicValue;
for (int i = 1; i < sortedList.size(); i++) {
assert sortedList.get(i).publicValue >= prevValue;
prevValue = sortedList.get(i).publicValue;
}
}
@Test
public void testSortingByPublicValueFieldNameDescending() {
List<TestObject> objects = generateList(10);
List<TestObject> sortedList = new ListObject.Builder<TestObject>()
.withObjects(objects, TestObject.class)
.withSortField("publicValue")
.withSortAscending(false)
.build().list;
assert sortedList.size() == objects.size();
int prevValue = sortedList.get(0).publicValue;
for (int i = 1; i < sortedList.size(); i++) {
assert sortedList.get(i).publicValue < prevValue;
prevValue = sortedList.get(i).publicValue;
}
}
@Test(expected = RuntimeException.class)
public void testSortingByPrivateValueFieldName() {
try {
ListObject<TestObject> listobject = new ListObject.Builder<TestObject>()
.withObjects(generateList(10), TestObject.class)
.withSortField("privateValue")
.withSortAscending(true)
.build();
} catch (Exception e) {
assertTrue(e.getMessage().contains("Cannot access sort field."));
throw e;
}
}
@Test
public void testSortingByPrivateGetterValueFieldName() {
List<TestObject> objects = generateList(10);
List<TestObject> sortedList = new ListObject.Builder<TestObject>()
.withObjects(objects, TestObject.class)
.withSortField("privateGetterValue")
.withSortAscending(true)
.build().list;
assert sortedList.size() == objects.size();
int prevValue = sortedList.get(0).publicValue;
for (int i = 1; i < sortedList.size(); i++) {
assert sortedList.get(i).publicValue >= prevValue;
prevValue = sortedList.get(i).publicValue;
}
}
@Test
public void testSortingByProtectedValueFieldName() {
List<TestObject> objects = generateList(10);
List<TestObject> sortedList = new ListObject.Builder<TestObject>()
.withObjects(objects, TestObject.class)
.withSortField("protectedValue")
.withSortAscending(true)
.build().list;
assert sortedList.size() == objects.size();
int prevValue = sortedList.get(0).publicValue;
for (int i = 1; i < sortedList.size(); i++) {
assert sortedList.get(i).publicValue >= prevValue;
prevValue = sortedList.get(i).publicValue;
}
}
@Test
public void testPaginationLimit() {
List<TestObject> objects = generateList(10);
assert (new ListObject.Builder<TestObject>()
.withObjects(objects, TestObject.class)
.withLimit(5)
.build().list.size() == 5);
}
@Test(expected = IllegalStateException.class)
public void testPaginationInvalidLimit() {
try {
int size = new ListObject.Builder<TestObject>()
.withObjects(generateList(10), TestObject.class)
.withLimit(-1)
.build().list.size();
} catch (Exception e) {
assertTrue(e.getMessage().contains("limit needs to be greater than 0"));
throw e;
}
}
@Test
public void testPaginationLimitAndOffset() {
List<TestObject> objects = generateList(10);
List<TestObject> list = new ListObject.Builder<TestObject>()
.withObjects(objects, TestObject.class)
.withLimit(5)
.withOffset(1)
.build().list;
assert list.size() == 5;
for (int i =0; i< 5; i++) {
assert list.get(i).publicValue == objects.get(i+1).publicValue;
}
}
@Test
public void testPaginationTooBigLimitAndOffset() {
List<TestObject> objects = generateList(10);
List<TestObject> list = new ListObject.Builder<TestObject>()
.withObjects(objects, TestObject.class)
.withLimit(5)
.withOffset(6)
.build().list;
assert list.size() == 4;
for (int i =0; i< 4; i++) {
assert list.get(i).publicValue == objects.get(i+6).publicValue;
}
}
@Test
public void testPaginationTooBigLimitAndInvalidOffset() {
List<TestObject> objects = generateList(10);
List<TestObject> list = new ListObject.Builder<TestObject>()
.withObjects(objects, TestObject.class)
.withLimit(5)
.withOffset(11)
.build().list;
assert list.size() == 0;
}
@Test
public void testEmptyList() {
List<TestObject> objects = new ArrayList<>();
List<TestObject> list = new ListObject.Builder<TestObject>()
.withObjects(objects, TestObject.class)
.withOffset(0)
.build().list;
assert list.size() == 0;
}
private ArrayList<TestObject> generateList(int size) {
assert size > 0;
ArrayList<TestObject> list = new ArrayList<>();
for (int i = 0; i < size; i++) {
list.add(new TestObject());
}
return list;
}
public static class TestObject {
private int privateValue;
private int privateGetterValue;
public int publicValue;
protected int protectedValue;
public Tuple1<Integer> complexTypeField;
public TestObject() {
int randomVal = rnd.nextInt() % 10000;
this.privateValue = randomVal;
this.privateGetterValue = randomVal;
this.publicValue = randomVal;
this.protectedValue = randomVal;
this.complexTypeField = new Tuple1<>(randomVal);
}
public int getPrivateGetterValue() {
return this.privateGetterValue;
}
}
}
| 7,916 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v0/TestMantisClient.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.NamedJobInfo;
import io.mantisrx.server.core.master.MasterDescription;
import io.netty.buffer.ByteBuf;
import io.netty.handler.codec.http.HttpResponseStatus;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import mantis.io.reactivex.netty.RxNetty;
import mantis.io.reactivex.netty.pipeline.PipelineConfigurators;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Func1;
import rx.functions.Func2;
public class TestMantisClient {
private static final Logger logger = LoggerFactory.getLogger(TestMantisClient.class);
private final int serverPort;
public TestMantisClient(final int serverPort) {
this.serverPort = serverPort;
}
private final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic = attempts -> attempts
.zipWith(Observable.range(1, Integer.MAX_VALUE), (Func2<Throwable, Integer, Integer>) (t1, integer) -> integer)
.flatMap((Func1<Integer, Observable<?>>) integer -> {
long delay = 2 * (integer > 10 ? 10 : integer);
logger.info(": retrying conx after sleeping for " + delay + " secs");
return Observable.timer(delay, TimeUnit.SECONDS);
});
private final Func1<Observable<? extends Void>, Observable<?>> repeatLogic = attempts -> attempts
.zipWith(Observable.range(1, Integer.MAX_VALUE), (Func2<Void, Integer, Integer>) (t1, integer) -> integer)
.flatMap((Func1<Integer, Observable<?>>) integer -> {
long delay = 2 * (integer > 10 ? 10 : integer);
logger.warn("On Complete received! : repeating conx after sleeping for " + delay + " secs");
return Observable.timer(delay, TimeUnit.SECONDS);
});
private HttpClient<ByteBuf, ServerSentEvent> getRxnettySseClient(String hostname, int port) {
return RxNetty.<ByteBuf, ServerSentEvent>newHttpClientBuilder(hostname, port)
.pipelineConfigurator(PipelineConfigurators.<ByteBuf>clientSseConfigurator())
// .enableWireLogging(LogLevel.INFO)
.withNoConnectionPooling().build();
}
public Observable<JobSchedulingInfo> schedulingChanges(final String jobId,
final Func1<Observable<? extends Throwable>, Observable<?>> retryFn,
final Func1<Observable<? extends Void>, Observable<?>> repeatFn) {
return Observable.just(new MasterDescription("localhost", "127.0.0.1", serverPort, serverPort,
serverPort, "/api/postjobstatus", serverPort, System.currentTimeMillis()))
.retryWhen(retryFn)
.switchMap(new Func1<MasterDescription, Observable<JobSchedulingInfo>>() {
@Override
public Observable<JobSchedulingInfo> call(MasterDescription masterDescription) {
return getRxnettySseClient(masterDescription.getHostname(), masterDescription.getSchedInfoPort())
.submit(HttpClientRequest.createGet("/assignmentresults/" + jobId + "?sendHB=true"))
.flatMap(new Func1<HttpClientResponse<ServerSentEvent>, Observable<JobSchedulingInfo>>() {
@Override
public Observable<JobSchedulingInfo> call(HttpClientResponse<ServerSentEvent> response) {
if (!HttpResponseStatus.OK.equals(response.getStatus())) {
return Observable.error(new Exception(response.getStatus().reasonPhrase()));
}
return response.getContent()
.map(new Func1<ServerSentEvent, JobSchedulingInfo>() {
@Override
public JobSchedulingInfo call(ServerSentEvent event) {
try {
return Jackson.fromJSON(event.contentAsString(), JobSchedulingInfo.class);
} catch (IOException e) {
throw new RuntimeException("Invalid schedInfo json: " + e.getMessage(), e);
}
}
})
.timeout(3 * 60, TimeUnit.SECONDS)
.filter(new Func1<JobSchedulingInfo, Boolean>() {
@Override
public Boolean call(JobSchedulingInfo schedulingInfo) {
return schedulingInfo != null && !JobSchedulingInfo.HB_JobId.equals(schedulingInfo.getJobId());
}
})
;
}
})
;
}
})
.repeatWhen(repeatFn)
.retryWhen(retryFn)
;
}
public Observable<JobSchedulingInfo> schedulingChanges(final String jobId) {
return schedulingChanges(jobId, retryLogic, repeatLogic);
}
public Observable<NamedJobInfo> namedJobInfo(final String jobName, final Func1<Observable<? extends Throwable>, Observable<?>> retryFn,
final Func1<Observable<? extends Void>, Observable<?>> repeatFn) {
return Observable.just(new MasterDescription("localhost", "127.0.0.1", serverPort, serverPort,
serverPort, "/api/postjobstatus", serverPort, System.currentTimeMillis()))
.filter(new Func1<MasterDescription, Boolean>() {
@Override
public Boolean call(MasterDescription masterDescription) {
return masterDescription != null;
}
})
.retryWhen(retryFn)
.switchMap(new Func1<MasterDescription, Observable<NamedJobInfo>>() {
@Override
public Observable<NamedJobInfo> call(MasterDescription masterDescription) {
return getRxnettySseClient(masterDescription.getHostname(), masterDescription.getSchedInfoPort())
.submit(HttpClientRequest.createGet("/namedjobs/" + jobName + "?sendHB=true"))
.flatMap(new Func1<HttpClientResponse<ServerSentEvent>, Observable<NamedJobInfo>>() {
@Override
public Observable<NamedJobInfo> call(HttpClientResponse<ServerSentEvent> response) {
if(!HttpResponseStatus.OK.equals(response.getStatus()))
return Observable.error(new Exception(response.getStatus().reasonPhrase()));
return response.getContent()
.map(new Func1<ServerSentEvent, NamedJobInfo>() {
@Override
public NamedJobInfo call(ServerSentEvent event) {
try {
return Jackson.fromJSON(event.contentAsString(), NamedJobInfo.class);
} catch (IOException e) {
throw new RuntimeException("Invalid namedJobInfo json: " + e.getMessage(), e);
}
}
})
.timeout(3 * 60, TimeUnit.SECONDS)
.filter(new Func1<NamedJobInfo, Boolean>() {
@Override
public Boolean call(NamedJobInfo namedJobInfo) {
return namedJobInfo != null && !JobSchedulingInfo.HB_JobId.equals(namedJobInfo.getName());
}
})
;
}})
;
}
})
.repeatWhen(repeatFn)
.retryWhen(retryFn)
;
}
public Observable<NamedJobInfo> namedJobInfo(final String jobName) {
return namedJobInfo(jobName, retryLogic, repeatLogic);
}
}
| 7,917 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v0/JobRouteTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.http.javadsl.ConnectHttp;
import akka.http.javadsl.Http;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.ContentTypes;
import akka.http.javadsl.model.HttpCharsets;
import akka.http.javadsl.model.HttpEntities;
import akka.http.javadsl.model.HttpEntity;
import akka.http.javadsl.model.HttpMethods;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.http.javadsl.model.MediaTypes;
import akka.stream.ActorMaterializer;
import akka.stream.javadsl.Flow;
import akka.util.ByteString;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.JobClustersManagerActor;
import io.mantisrx.master.api.akka.payloads.JobClusterPayloads;
import io.mantisrx.master.api.akka.payloads.JobPayloads;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.api.akka.route.MantisMasterRoute;
import io.mantisrx.master.api.akka.route.handlers.JobArtifactRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobArtifactRouteHandlerImpl;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.ResourceClusterRouteHandler;
import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter;
import io.mantisrx.master.api.akka.route.v1.AdminMasterRoute;
import io.mantisrx.master.api.akka.route.v1.AgentClustersRoute;
import io.mantisrx.master.api.akka.route.v1.JobArtifactsRoute;
import io.mantisrx.master.api.akka.route.v1.JobClustersRoute;
import io.mantisrx.master.api.akka.route.v1.JobDiscoveryStreamRoute;
import io.mantisrx.master.api.akka.route.v1.JobStatusStreamRoute;
import io.mantisrx.master.api.akka.route.v1.JobsRoute;
import io.mantisrx.master.api.akka.route.v1.LastSubmittedJobIdStreamRoute;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.job.JobTestHelper;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataView;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.scheduler.FakeMantisScheduler;
import io.mantisrx.master.vm.AgentClusterOperations;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.NamedJobInfo;
import io.mantisrx.server.core.WorkerAssignments;
import io.mantisrx.server.core.master.LocalMasterMonitor;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.master.LeaderRedirectionFilter;
import io.mantisrx.server.master.LeadershipManagerLocalImpl;
import io.mantisrx.server.master.http.api.CompactJobInfo;
import io.mantisrx.server.master.persistence.FileBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.persistence.KeyValueBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.resourcecluster.ResourceClusters;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import io.mantisrx.server.master.store.FileBasedStore;
import io.mantisrx.server.master.store.MantisStageMetadataWritable;
import io.mantisrx.server.master.store.MantisWorkerMetadataWritable;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import java.io.IOException;
import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import rx.Observable;
public class JobRouteTest {
private final static Logger logger = LoggerFactory.getLogger(JobRouteTest.class);
private final ActorMaterializer materializer = ActorMaterializer.create(system);
private final Http http = Http.get(system);
private static Thread t;
private static final int serverPort = 8203;
private static final int targetEndpointPort = serverPort;
private final TestMantisClient mantisClient = new TestMantisClient(serverPort);
private CompletionStage<String> processRespFut(
final HttpResponse r,
final Optional<Integer> expectedStatusCode) {
logger.info("headers {} {}", r.getHeaders(), r.status());
expectedStatusCode.ifPresent(sc -> assertEquals(sc.intValue(), r.status().intValue()));
assert (r.getHeader("Access-Control-Allow-Origin").isPresent());
assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value());
CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer);
return strictEntity.thenCompose(s ->
s.getDataBytes()
.runFold(
ByteString.emptyByteString(),
(acc, b) -> acc.concat(b),
materializer)
.thenApply(s2 -> s2.utf8String())
);
}
private CompletionStage<String> processRespFutWithoutHeadersCheck(
final HttpResponse r,
final Optional<Integer> expectedStatusCode) {
logger.info("headers {} {}", r.getHeaders(), r.status());
expectedStatusCode.ifPresent(sc -> assertEquals(sc.intValue(), r.status().intValue()));
CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer);
return strictEntity.thenCompose(s ->
s.getDataBytes()
.runFold(
ByteString.emptyByteString(),
(acc, b) -> acc.concat(b),
materializer)
.thenApply(s2 -> s2.utf8String())
);
}
private String getResponseMessage(final String msg, final Throwable t) {
if (t != null) {
logger.error("got err ", t);
t.printStackTrace();
fail(t.getMessage());
} else {
return msg;
}
return "";
}
private static CompletionStage<ServerBinding> binding;
private static ActorSystem system = ActorSystem.create("JobRoutes");
@BeforeClass
public static void setup() throws Exception {
JobTestHelper.deleteAllFiles();
JobTestHelper.createDirsIfRequired();
final CountDownLatch latch = new CountDownLatch(1);
TestHelpers.setupMasterConfig();
t = new Thread(() -> {
try {
// boot up server using the route as defined below
final Http http = Http.get(system);
final ActorMaterializer materializer = ActorMaterializer.create(system);
// new File("/tmp/MantisSpool/namedJobs").mkdirs();
// IMantisStorageProvider storageProvider = new MantisStorageProviderAdapter(simpleCachedFileStorageProvider);
final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(
new AuditEventSubscriberLoggingImpl(),
new StatusEventSubscriberLoggingImpl(),
new WorkerEventSubscriberLoggingImpl());
IMantisPersistenceProvider mantisStorageProvider = new KeyValueBasedPersistenceProvider(new FileBasedStore(), lifecycleEventPublisher);
ActorRef jobClustersManagerActor = system.actorOf(
JobClustersManagerActor.props(
new MantisJobStore(new FileBasedPersistenceProvider(true)),
lifecycleEventPublisher,
CostsCalculator.noop()),
"jobClustersManager");
MantisSchedulerFactory fakeSchedulerFactory = mock(MantisSchedulerFactory.class);
MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor);
when(fakeSchedulerFactory.forJob(any())).thenReturn(fakeScheduler);
jobClustersManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(
fakeSchedulerFactory,
false), ActorRef.noSender());
final JobClusterRouteHandler jobClusterRouteHandler = new JobClusterRouteHandlerAkkaImpl(
jobClustersManagerActor);
final JobArtifactRouteHandler jobArtifactRouteHandler = new JobArtifactRouteHandlerImpl(mantisStorageProvider);
final JobRouteHandler jobRouteHandler = new JobRouteHandlerAkkaImpl(
jobClustersManagerActor);
MasterDescription masterDescription = new MasterDescription(
"127.0.0.1",
"127.0.0.1",
serverPort,
serverPort,
serverPort,
"api/postjobstatus",
serverPort,
System.currentTimeMillis());
Duration idleTimeout = system.settings()
.config()
.getDuration("akka.http.server.idle-timeout");
logger.info("idle timeout {} sec ", idleTimeout.getSeconds());
final JobDiscoveryRouteHandler jobDiscoveryRouteHandler = new JobDiscoveryRouteHandlerAkkaImpl(
jobClustersManagerActor,
idleTimeout);
final MasterDescriptionRoute masterDescriptionRoute = new MasterDescriptionRoute(
masterDescription);
final JobRoute v0JobRoute = new JobRoute(jobRouteHandler, system);
final JobDiscoveryRoute v0JobDiscoveryRoute = new JobDiscoveryRoute(
jobDiscoveryRouteHandler);
final JobClusterRoute v0JobClusterRoute = new JobClusterRoute(
jobClusterRouteHandler,
jobRouteHandler,
system);
final JobClustersRoute v1JobClusterRoute = new JobClustersRoute(
jobClusterRouteHandler, system);
final JobsRoute v1JobsRoute = new JobsRoute(
jobClusterRouteHandler,
jobRouteHandler,
system);
final JobArtifactsRoute v1JobArtifactsRoute = new JobArtifactsRoute(jobArtifactRouteHandler);
final AdminMasterRoute v1AdminMasterRoute = new AdminMasterRoute(masterDescription);
final JobStatusRouteHandler jobStatusRouteHandler = mock(JobStatusRouteHandler.class);
when(jobStatusRouteHandler.jobStatus(anyString())).thenReturn(Flow.create());
final JobStatusRoute v0JobStatusRoute = new JobStatusRoute(jobStatusRouteHandler);
final AgentClusterOperations mockAgentClusterOps = mock(AgentClusterOperations.class);
final AgentClusterRoute v0AgentClusterRoute = new AgentClusterRoute(
mockAgentClusterOps,
system);
final AgentClustersRoute v1AgentClusterRoute = new AgentClustersRoute(
mockAgentClusterOps);
final JobDiscoveryStreamRoute v1JobDiscoveryStreamRoute = new JobDiscoveryStreamRoute(jobDiscoveryRouteHandler);
final LastSubmittedJobIdStreamRoute v1LastSubmittedJobIdStreamRoute = new LastSubmittedJobIdStreamRoute(jobDiscoveryRouteHandler);
final JobStatusStreamRoute v1JobStatusStreamRoute = new JobStatusStreamRoute(jobStatusRouteHandler);
final ResourceClusters resourceClusters = mock(ResourceClusters.class);
LocalMasterMonitor localMasterMonitor = new LocalMasterMonitor(masterDescription);
LeadershipManagerLocalImpl leadershipMgr = new LeadershipManagerLocalImpl(
masterDescription);
leadershipMgr.setLeaderReady();
LeaderRedirectionFilter leaderRedirectionFilter = new LeaderRedirectionFilter(
localMasterMonitor,
leadershipMgr);
final MantisMasterRoute app = new MantisMasterRoute(
system,
leaderRedirectionFilter,
masterDescriptionRoute,
v0JobClusterRoute,
v0JobRoute,
v0JobDiscoveryRoute,
v0JobStatusRoute,
v0AgentClusterRoute,
v1JobClusterRoute,
v1JobsRoute,
v1JobArtifactsRoute,
v1AdminMasterRoute,
v1AgentClusterRoute,
v1JobDiscoveryStreamRoute,
v1LastSubmittedJobIdStreamRoute,
v1JobStatusStreamRoute,
resourceClusters,
mock(ResourceClusterRouteHandler.class));
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = app.createRoute()
.flow(system, materializer);
logger.info("starting test server on port {}", serverPort);
binding = http.bindAndHandle(
routeFlow,
ConnectHttp.toHost("localhost", serverPort),
materializer);
latch.countDown();
} catch (Exception e) {
logger.info("caught exception", e);
latch.countDown();
e.printStackTrace();
}
});
t.setDaemon(true);
t.start();
latch.await();
Thread.sleep(100);
}
@AfterClass
public static void teardown() {
logger.info("JobRouteTest teardown");
binding
.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> system.terminate()); // and shutdown when done
t.interrupt();
}
private String jobClusterAPIEndpoint(final String endpoint) {
return String.format("http://127.0.0.1:%d/api/namedjob/%s", targetEndpointPort, endpoint);
}
private String jobAPIEndpoint(final String endpoint) {
return String.format("http://127.0.0.1:%d/api/jobs/%s", targetEndpointPort, endpoint);
}
@Test
public void cleanupExistingJobs() throws InterruptedException {
// Disable cluster to terminate all running jobs
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(jobClusterAPIEndpoint("disable"))
.withMethod(HttpMethods.POST)
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_DISABLE)));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.empty()))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
final CountDownLatch latch2 = new CountDownLatch(1);
final CompletionStage<HttpResponse> respF = http.singleRequest(
HttpRequest.POST(jobClusterAPIEndpoint("delete"))
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_DELETE)));
respF.thenCompose(r -> {
CompletionStage<HttpEntity.Strict> strictEntity = r.entity()
.toStrict(1000, materializer);
return strictEntity.thenCompose(s ->
s.getDataBytes()
.runFold(
ByteString.emptyByteString(),
(acc, b) -> acc.concat(b),
materializer)
.thenApply(s2 -> s2.utf8String()));
}).whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
latch2.countDown();
});
assertTrue(latch2.await(1, TimeUnit.SECONDS));
}
@Test(dependsOnMethods = {"cleanupExistingJobs"})
public void setupJobCluster() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(jobClusterAPIEndpoint("create"))
.withMethod(HttpMethods.POST)
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_CREATE)));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function created", responseMessage);
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
@Test(dependsOnMethods = {"setupJobCluster"})
public void testJobSubmit() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(String.format(
"http://127.0.0.1:%d/api/submit",
targetEndpointPort))
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_SUBMIT)));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function-1", responseMessage);
latch.countDown();
});
assertTrue(latch.await(10, TimeUnit.SECONDS));
}
@Test(dependsOnMethods = {"testJobSubmit"})
public void testJobClusterGetJobIds() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(jobClusterAPIEndpoint("listJobIds/sine-function?jobState=Active")));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((msg, t) -> {
try {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
List<JobClusterProtoAdapter.JobIdInfo> jobIdInfos = Jackson.fromJSON(
responseMessage,
new TypeReference<List<JobClusterProtoAdapter.JobIdInfo>>() {
});
logger.info("jobInfos---> {}", jobIdInfos);
assertEquals(1, jobIdInfos.size());
JobClusterProtoAdapter.JobIdInfo jobIdInfo = jobIdInfos.get(0);
assertEquals("sine-function-1", jobIdInfo.getJobId());
//assertEquals("0.1.39 2018-03-13 09:40:53", jobIdInfo.getVersion());
assertEquals("", jobIdInfo.getTerminatedAt());
assertEquals("nmahilani", jobIdInfo.getUser());
assertTrue(jobIdInfo.getState().equals(MantisJobState.Accepted) ||
jobIdInfo.getState().equals(MantisJobState.Launched));
} catch (Exception e) {
fail("unexpected error " + e.getMessage());
}
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
@Test(dependsOnMethods = {"testJobClusterGetJobIds"})
public void testJobClusterGetJobsList() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(jobAPIEndpoint("list")));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response---> {}", responseMessage);
List<MantisJobMetadataView> jobInfos = Collections.emptyList();
try {
jobInfos = Jackson.fromJSON(
responseMessage,
new TypeReference<List<MantisJobMetadataView>>() {
});
} catch (IOException e) {
logger.error("failed to deser json {}", responseMessage, e);
fail("job list deser failed");
}
logger.info("jobInfos---> {}", jobInfos);
assertEquals(1, jobInfos.size());
MantisJobMetadataView mjm = jobInfos.get(0);
assertEquals(mjm.getJobMetadata().getJobId(), "sine-function-1");
assertEquals(mjm.getJobMetadata().getName(), "sine-function");
assertTrue(mjm.getStageMetadataList().size() > 0);
MantisStageMetadataWritable msm = mjm.getStageMetadataList().get(0);
assertEquals(1, msm.getNumWorkers());
assertTrue(mjm.getWorkerMetadataList().size() > 0);
MantisWorkerMetadataWritable mwm = mjm.getWorkerMetadataList().get(0);
assertEquals("sine-function-1", mwm.getJobId());
assertEquals(false, mwm.getCluster().isPresent());
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
@Test(dependsOnMethods = {"testJobClusterGetJobsList"})
public void testJobClusterGetJobDetail() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(jobAPIEndpoint("list/sine-function-1")));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response---> {}", responseMessage);
MantisJobMetadataView mjm = null;
try {
mjm = Jackson.fromJSON(responseMessage, MantisJobMetadataView.class);
} catch (IOException e) {
logger.error("failed to deser json {}", responseMessage, e);
fail("job info deser failed");
}
logger.info("jobInfo---> {}", mjm);
assertNotNull(mjm);
assertEquals(mjm.getJobMetadata().getJobId(), "sine-function-1");
assertEquals(mjm.getJobMetadata().getName(), "sine-function");
assertTrue(mjm.getStageMetadataList().size() > 0);
MantisStageMetadataWritable msm = mjm.getStageMetadataList().get(0);
assertEquals(1, msm.getNumWorkers());
assertTrue(mjm.getWorkerMetadataList().size() > 0);
MantisWorkerMetadataWritable mwm = mjm.getWorkerMetadataList().get(0);
assertEquals("sine-function-1", mwm.getJobId());
assertEquals(false, mwm.getCluster().isPresent());
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
@Test(dependsOnMethods = {"testJobClusterGetJobDetail"})
public void testJobClusterGetJobsCompact() throws InterruptedException {
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(jobAPIEndpoint("list?compact=true")));
try {
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
List<CompactJobInfo> jobIdInfos = Collections.emptyList();
try {
jobIdInfos = Jackson.fromJSON(
responseMessage,
new TypeReference<List<CompactJobInfo>>() {
});
} catch (IOException e) {
logger.error(
"failed to get CompactJobInfos from json response {}",
responseMessage,
e);
fail("compactJobInfo deser failed");
}
logger.info("got jobIdInfos {}", jobIdInfos);
assertEquals(1, jobIdInfos.size());
CompactJobInfo jobInfo = jobIdInfos.get(0);
assertEquals("sine-function-1", jobInfo.getJobId());
assertEquals("nmahilani", jobInfo.getUser());
assertEquals(7, jobInfo.getLabels().size());
assertEquals(2, jobInfo.getNumStages());
assertEquals(2, jobInfo.getNumWorkers());
assertTrue(jobInfo.getState().equals(MantisJobState.Accepted) ||
jobInfo.getState().equals(MantisJobState.Launched));
assertEquals(2.0, jobInfo.getTotCPUs(), 0.0);
// TODO total memory is 400 for old master, 2048 for new master
//assertEquals(400.0, jobInfo.getTotMemory(), 0.0);
assertEquals(MantisJobDurationType.Perpetual, jobInfo.getType());
assertTrue(Collections.singletonMap("Started", 2)
.equals(jobInfo.getStatesSummary()) ||
Collections.singletonMap("StartInitiated", 2)
.equals(jobInfo.getStatesSummary()) ||
Collections.singletonMap("Launched", 2)
.equals(jobInfo.getStatesSummary()) ||
Collections.singletonMap("Accepted", 2)
.equals(jobInfo.getStatesSummary()));
}).toCompletableFuture()
.get(2, TimeUnit.SECONDS);
} catch (ExecutionException e) {
throw new RuntimeException(e);
} catch (TimeoutException e) {
throw new RuntimeException(e);
}
}
@Test(dependsOnMethods = {"testJobClusterGetJobsCompact"})
public void testNamedJobInfoStream() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final String jobCluster = "sine-function";
Observable<NamedJobInfo> namedJobInfo = mantisClient.namedJobInfo(jobCluster);
namedJobInfo
.doOnNext(lastSubmittedJobId -> {
logger.info(
"namedJobInfo {} {}",
lastSubmittedJobId.getName(),
lastSubmittedJobId.getJobId());
try {
lastSubmittedJobId.getName();
assertEquals("sine-function", lastSubmittedJobId.getName());
assertEquals("sine-function-1", lastSubmittedJobId.getJobId());
} catch (Exception e) {
logger.error("caught exception", e);
org.testng.Assert.fail(
"testNamedJobInfoStream test failed with exception " +
e.getMessage(),
e);
}
latch.countDown();
})
.doOnError(t -> logger.warn("onError", t))
.doOnCompleted(() -> logger.info("onCompleted"))
.doAfterTerminate(() -> latch.countDown())
.subscribe();
latch.await();
}
@Test(dependsOnMethods = {"testNamedJobInfoStream"})
public void testSchedulingInfo() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final String jobId = "sine-function-1";
// final AtomicBoolean flag = new AtomicBoolean(false);
Observable<JobSchedulingInfo> jobSchedulingInfoObservable = mantisClient.schedulingChanges(
jobId);
jobSchedulingInfoObservable
.map(schedInfo -> {
logger.info("schedInfo {}", schedInfo);
try {
assertEquals(jobId, schedInfo.getJobId());
Map<Integer, WorkerAssignments> wa = schedInfo.getWorkerAssignments();
assertEquals(2, wa.size());
// 1 worker in stage 0
assertEquals(1, wa.get(0).getHosts().size());
assertEquals(0, wa.get(0).getHosts().get(1).getWorkerIndex());
assertEquals(1, wa.get(0).getHosts().get(1).getWorkerNumber());
assertEquals(
MantisJobState.Started,
wa.get(0).getHosts().get(1).getState());
// 1 worker in stage 1
assertEquals(1, wa.get(1).getHosts().size());
assertEquals(0, wa.get(1).getHosts().get(2).getWorkerIndex());
assertEquals(2, wa.get(1).getHosts().get(2).getWorkerNumber());
assertEquals(
MantisJobState.Started,
wa.get(1).getHosts().get(2).getState());
// if (flag.compareAndSet(false, true)) {
// testJobResubmitWorker();
// }
} catch (Exception e) {
logger.error("caught exception", e);
org.testng.Assert.fail(
"testSchedulingInfo test failed with exception " + e.getMessage(),
e);
}
latch.countDown();
return schedInfo;
})
.take(1)
.doOnError(t -> logger.warn("onError", t))
.doOnCompleted(() -> logger.info("onCompleted"))
.doAfterTerminate(() -> latch.countDown())
.subscribe();
latch.await();
}
@Test(dependsOnMethods = {"testSchedulingInfo"})
public void testJobResubmitWorker() throws InterruptedException {
Thread.sleep(3000);
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(jobAPIEndpoint(JobRoute.RESUBMIT_WORKER_ENDPOINT))
.withMethod(HttpMethods.POST)
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobPayloads.RESUBMIT_WORKER)));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
assertTrue(responseMessage.startsWith(
"Worker 2 of job sine-function-1 resubmitted"));
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
@Test(dependsOnMethods = {"testJobResubmitWorker"})
public void testJobClusterGetJobArchivedWorkersList() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(jobAPIEndpoint("archived/sine-function-1")));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("############################got response---> {}", responseMessage);
//JobArchivedWorkersResponse resp = null;
//JobArchivedWorkersResponse.Builder builder = JobArchivedWorkersResponse.newBuilder();
List<MantisWorkerMetadataWritable> workers = Collections.emptyList();
try {
// JsonFormat.parser().ignoringUnknownFields().merge(responseMessage, builder);
workers = Jackson.fromJSON(
responseMessage,
new TypeReference<List<MantisWorkerMetadataWritable>>() {
});
// resp = builder.build();
} catch (IOException e) {
logger.error("failed to deser json {}", responseMessage, e);
fail("archived workers list deser failed");
}
logger.info("archived workers ---> {}", workers);
assertEquals(1, workers.size());
MantisWorkerMetadataWritable worker = workers.get(0);
// WorkerMetadata worker = resp.getWorkers(0);
// assertEquals(1, resp.getWorkersCount());
assertEquals("sine-function-1", worker.getJobId());
logger.info("no of ports " + worker.getNumberOfPorts());
assertEquals(5, worker.getNumberOfPorts());
logger.info("stage num " + worker.getStageNum());
assertEquals(1, worker.getStageNum());
logger.info("Reason " + worker.getReason().name());
//assertEquals("Relaunched", worker.getReason().name());
logger.info("state " + worker.getState().name());
assertEquals("Failed", worker.getState().name());
logger.info("index " + worker.getWorkerIndex());
assertEquals(0, worker.getWorkerIndex());
logger.info("worker no " + worker.getWorkerNumber());
assertEquals(2, worker.getWorkerNumber());
logger.info("resubmit cnt " + worker.getTotalResubmitCount());
assertEquals(0, worker.getTotalResubmitCount());
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
@Test(dependsOnMethods = {"testJobClusterGetJobArchivedWorkersList"})
public void testJobClusterScaleStage() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(jobAPIEndpoint(JobRoute.SCALE_STAGE_ENDPOINT))
.withMethod(HttpMethods.POST)
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobPayloads.SCALE_STAGE)));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
assertEquals("Scaled stage 1 to 3 workers", responseMessage);
latch.countDown();
});
assertTrue(latch.await(1, TimeUnit.SECONDS));
}
@Test(dependsOnMethods = {"testJobClusterScaleStage"})
public void testJobStatus() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(String.format(
"http://127.0.0.1:%d/api/postjobstatus",
targetEndpointPort))
.withMethod(HttpMethods.POST)
.withEntity(
ContentTypes.create(
MediaTypes.TEXT_PLAIN,
HttpCharsets.ISO_8859_1),
JobPayloads.JOB_STATUS));
responseFuture
.thenCompose(r -> processRespFutWithoutHeadersCheck(r, Optional.of(200)))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response '{}'", responseMessage);
assertEquals("forwarded worker status", responseMessage);
latch.countDown();
});
assertTrue(latch.await(1, TimeUnit.SECONDS));
}
@Test(dependsOnMethods = {"testJobStatus"})
public void testJobKill() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(jobAPIEndpoint(JobRoute.KILL_ENDPOINT))
.withMethod(HttpMethods.POST)
.withEntity(
ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED)
,
JobPayloads.KILL_JOB));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
assertEquals("[\"sine-function-1 Killed\"]", responseMessage.trim());
latch.countDown();
});
assertTrue(latch.await(1, TimeUnit.SECONDS));
}
@Test(dependsOnMethods = {"testJobKill"})
public void testJobClusterDisable() throws InterruptedException {
// Disable cluster to terminate all running jobs
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(jobClusterAPIEndpoint("disable"))
.withMethod(HttpMethods.POST)
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_DISABLE)));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function disabled", responseMessage);
latch.countDown();
});
assertTrue(latch.await(1, TimeUnit.SECONDS));
}
@Test(dependsOnMethods = {"testJobClusterDisable"})
public void testJobClusterDelete() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(jobClusterAPIEndpoint("delete"))
.withEntity(HttpEntities.create(
ContentTypes.APPLICATION_JSON,
JobClusterPayloads.JOB_CLUSTER_DELETE)));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function deleted", responseMessage);
latch.countDown();
});
assertTrue(latch.await(1, TimeUnit.SECONDS));
}
}
| 7,918 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v0/JobClusterRouteTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.http.javadsl.ConnectHttp;
import akka.http.javadsl.Http;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.ContentTypes;
import akka.http.javadsl.model.HttpEntities;
import akka.http.javadsl.model.HttpEntity;
import akka.http.javadsl.model.HttpMethods;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.http.javadsl.model.MediaTypes;
import akka.stream.ActorMaterializer;
import akka.stream.javadsl.Flow;
import akka.util.ByteString;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.common.Label;
import io.mantisrx.master.JobClustersManagerActor;
import io.mantisrx.master.api.akka.payloads.JobClusterPayloads;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobRouteHandlerAkkaImpl;
import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.MantisJobClusterMetadataView;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.scheduler.FakeMantisScheduler;
import io.mantisrx.server.master.persistence.FileBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import java.io.IOException;
import java.time.Duration;
import java.util.List;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JobClusterRouteTest {
private static final Logger logger = LoggerFactory.getLogger(JobClusterRouteTest.class);
private static final Duration latchTimeout = Duration.ofSeconds(10);
private final ActorMaterializer materializer = ActorMaterializer.create(system);
private final Http http = Http.get(system);
private static Thread t;
private static final int serverPort = 8301;
private CompletionStage<String> processRespFut(final HttpResponse r, final int expectedStatusCode) {
logger.info("headers {} {}", r.getHeaders(), r.status());
assertEquals(expectedStatusCode, r.status().intValue());
assert(r.getHeader("Access-Control-Allow-Origin").isPresent());
assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value());
CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer);
return strictEntity.thenCompose(s ->
s.getDataBytes()
.runFold(ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer)
.thenApply(s2 -> s2.utf8String())
);
}
private String getResponseMessage(final String msg, final Throwable t) {
if (t != null) {
logger.error("got err ", t);
fail(t.getMessage());
} else {
return msg;
}
return "";
}
private static CompletionStage<ServerBinding> binding;
private static ActorSystem system = ActorSystem.create("JobClusterRoutes");
@BeforeClass
public static void setup() throws Exception {
TestHelpers.setupMasterConfig();
final CountDownLatch latch = new CountDownLatch(1);
t = new Thread(() -> {
try {
// boot up server using the route as defined below
final Http http = Http.get(system);
final ActorMaterializer materializer = ActorMaterializer.create(system);
final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl());
ActorRef jobClustersManagerActor = system.actorOf(
JobClustersManagerActor.props(
new MantisJobStore(new FileBasedPersistenceProvider(true)),
lifecycleEventPublisher,
CostsCalculator.noop()),
"jobClustersManager");
MantisSchedulerFactory fakeSchedulerFactory = mock(MantisSchedulerFactory.class);
MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor);
when(fakeSchedulerFactory.forJob(any())).thenReturn(fakeScheduler);
jobClustersManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(fakeSchedulerFactory, false), ActorRef.noSender());
final JobClusterRouteHandler jobClusterRouteHandler = new JobClusterRouteHandlerAkkaImpl(jobClustersManagerActor);
final JobRouteHandler jobRouteHandler = new JobRouteHandlerAkkaImpl(jobClustersManagerActor);
final JobClusterRoute app = new JobClusterRoute(jobClusterRouteHandler, jobRouteHandler, system);
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = app.createRoute(Function.identity()).flow(system, materializer);
logger.info("starting test server on port {}", serverPort);
binding = http.bindAndHandle(routeFlow,
ConnectHttp.toHost("localhost", serverPort), materializer);
latch.countDown();
} catch (Exception e) {
logger.info("caught exception", e);
latch.countDown();
e.printStackTrace();
}
});
t.setDaemon(true);
t.start();
latch.await();
}
@AfterClass
public static void teardown() {
logger.info("V0JobClusterRouteTest teardown");
binding
.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> system.terminate()); // and shutdown when done
t.interrupt();
}
private String namedJobAPIEndpoint(final String endpoint) {
return String.format("http://127.0.0.1:%d/api/namedjob/%s", serverPort, endpoint);
}
@Test
public void testIt() throws Exception {
testJobClusterCreate();
testDuplicateJobClusterCreateFails();
testJobClusterDisable();
testJobClusterEnable();
testJobClusterUpdateArtifact();
testJobClusterUpdateSLA();
testJobClusterUpdateLabels();
testJobClusterUpdateMigrateStrategy();
testJobClusterQuickSubmit();
testJobClustersList();
testJobClusterGetDetail();
testJobClusterGetJobIds();
testJobClusterGetAllJobIds();
testJobClusterDisable2();
testJobClusterDelete();
testJobClusterCreateOnRC();
testJobClusterRCGetDetail();
}
private void testJobClusterCreate() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(namedJobAPIEndpoint("create"))
.withMethod(HttpMethods.POST)
.withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_CREATE));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function created", responseMessage);
latch.countDown();
});
assertTrue(latch.await(3, TimeUnit.SECONDS));
}
private void testJobClusterCreateOnRC() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(namedJobAPIEndpoint("create"))
.withMethod(HttpMethods.POST)
.withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_CREATE_RC));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function-rc created", responseMessage);
latch.countDown();
});
assertTrue(latch.await(3, TimeUnit.SECONDS));
}
private void testDuplicateJobClusterCreateFails() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(namedJobAPIEndpoint("create"))
.withMethod(HttpMethods.POST)
.withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_CREATE));
responseFuture
.thenCompose(r -> processRespFut(r, 500))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
assertTrue(responseMessage.startsWith("{\"error\":"));
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
private void testJobClusterDisable() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(namedJobAPIEndpoint("disable"))
.withMethod(HttpMethods.POST)
.withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_DISABLE));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function disabled", responseMessage);
latch.countDown();
});
assertTrue(latch.await(latchTimeout.getSeconds(), TimeUnit.SECONDS));
}
private void testJobClusterEnable() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(namedJobAPIEndpoint("enable"))
.withMethod(HttpMethods.POST)
.withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_DISABLE));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function enabled", responseMessage);
latch.countDown();
});
assertTrue(latch.await(latchTimeout.getSeconds(), TimeUnit.SECONDS));
}
private void testJobClusterUpdateArtifact() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(namedJobAPIEndpoint("quickupdate"))
.withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function artifact updated", responseMessage);
latch.countDown();
});
assertTrue(latch.await(latchTimeout.getSeconds(), TimeUnit.SECONDS));
}
private void testJobClusterUpdateSLA() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(namedJobAPIEndpoint("updatesla"))
.withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_UPDATE_SLA));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function SLA updated", responseMessage);
latch.countDown();
});
assertTrue(latch.await(latchTimeout.getSeconds(), TimeUnit.SECONDS));
}
private void testJobClusterUpdateLabels() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(namedJobAPIEndpoint("updatelabels"))
.withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_UPDATE_LABELS));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function labels updated", responseMessage);
latch.countDown();
});
assertTrue(latch.await(latchTimeout.getSeconds(), TimeUnit.SECONDS));
}
private void testJobClusterUpdateMigrateStrategy() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(namedJobAPIEndpoint("migratestrategy"))
.withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.MIGRATE_STRATEGY_UPDATE));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function worker migration config updated", responseMessage);
latch.countDown();
});
assertTrue(latch.await(latchTimeout.getSeconds(), TimeUnit.SECONDS));
}
private void testJobClusterQuickSubmit() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(namedJobAPIEndpoint("quicksubmit"))
.withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.QUICK_SUBMIT));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
assertTrue(responseMessage.contains("sine-function-1"));
latch.countDown();
});
assertTrue(latch.await(latchTimeout.getSeconds(), TimeUnit.SECONDS));
}
private void testJobClustersList() throws InterruptedException {
int numIter = 10;
final CountDownLatch latch = new CountDownLatch(numIter);
AtomicReference<String> prevResp = new AtomicReference<>(null);
for (int i =0; i < numIter; i++) {
final CompletionStage<HttpResponse> responseFuture2 = http.singleRequest(
HttpRequest.GET(namedJobAPIEndpoint("list")));
responseFuture2
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
try {
List<MantisJobClusterMetadataView> jobClusters = Jackson.fromJSON(responseMessage, new TypeReference<List<MantisJobClusterMetadataView>>() {
});
assertEquals(1, jobClusters.size());
MantisJobClusterMetadataView jobCluster = jobClusters.get(0);
assertEquals("sine-function", jobCluster.getName());
} catch (IOException e) {
fail("failed to parse response message " + e.getMessage());
}
if (prevResp.get() != null) {
assertEquals(prevResp.get(), responseMessage);
}
prevResp.set(responseMessage);
latch.countDown();
});
}
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
private void testJobClusterGetDetail() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(namedJobAPIEndpoint("list/sine-function")));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
try {
List<MantisJobClusterMetadataView> jobClusters = Jackson.fromJSON(responseMessage, new TypeReference<List<MantisJobClusterMetadataView>>() {});
assertEquals(1, jobClusters.size());
MantisJobClusterMetadataView jc = jobClusters.get(0);
assertEquals("sine-function", jc.getName());
// TODO fix Jars list
assertEquals(2, jc.getJars().size());
assertEquals(2, jc.getJars().get(0).getSchedulingInfo().getStages().size());
assertEquals(1, jc.getJars().get(0).getSchedulingInfo().getStages().get(1).getNumberOfInstances());
assertEquals(true, jc.getJars().get(0).getSchedulingInfo().getStages().get(1).getScalable());
assertEquals("sine-function", jc.getName());
} catch (Exception e) {
logger.error("failed to deser json {}", responseMessage, e);
fail("failed to deser json "+responseMessage);
}
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
private void testJobClusterRCGetDetail() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(namedJobAPIEndpoint("list/sine-function-rc")));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
try {
List<MantisJobClusterMetadataView> jobClusters = Jackson.fromJSON(responseMessage, new TypeReference<List<MantisJobClusterMetadataView>>() {});
assertEquals(1, jobClusters.size());
MantisJobClusterMetadataView jc = jobClusters.get(0);
assertEquals("sine-function-rc", jc.getName());
assertEquals(6, jc.getLabels().size());
List<Label> rcLabels = jc.getLabels().stream()
.filter(l -> l.getName().equals("_mantis.resourceCluster") && l.getValue().equals(
"mantisagent")).collect(Collectors.toList());
assertEquals(1, rcLabels.size());
} catch (Exception e) {
logger.error("failed to deser json {}", responseMessage, e);
fail("failed to deser json "+responseMessage);
}
latch.countDown();
});
assertTrue(latch.await(4, TimeUnit.SECONDS));
}
private void testJobClusterGetJobIds() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(namedJobAPIEndpoint("listJobIds/sine-function")));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
try {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
List<JobClusterProtoAdapter.JobIdInfo> jobIdInfos = Jackson.fromJSON(responseMessage, new TypeReference<List<JobClusterProtoAdapter.JobIdInfo>>() {
});
assertEquals(1, jobIdInfos.size());
} catch (Exception e) {
fail("unexpected error "+ e.getMessage());
}
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
private void testJobClusterGetAllJobIds() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(namedJobAPIEndpoint("listJobIds")));
responseFuture
.thenCompose(r -> processRespFut(r, 400))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
assertEquals("Specify the Job cluster name '/api/namedjob/listJobIds/<JobClusterName>' to list the job Ids", responseMessage);
latch.countDown();
});
assertTrue(latch.await(latchTimeout.getSeconds(), TimeUnit.SECONDS));
}
private void testJobClusterDisable2() throws InterruptedException {
// Disable cluster to terminate all running jobs
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(namedJobAPIEndpoint("disable"))
.withMethod(HttpMethods.POST)
.withEntity(HttpEntities.create(ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DISABLE)));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((r, t) -> {
String responseMessage = getResponseMessage(r, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function disabled", responseMessage);
latch.countDown();
});
assertTrue(latch.await(latchTimeout.getSeconds(), TimeUnit.SECONDS));
}
private void testJobClusterDelete() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(namedJobAPIEndpoint("delete"))
.withEntity(HttpEntities.create(ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DELETE)));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
assertEquals("sine-function deleted", responseMessage);
latch.countDown();
});
assertTrue(latch.await(latchTimeout.getSeconds(), TimeUnit.SECONDS));
}
}
| 7,919 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v0/JobStatusRouteTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.http.javadsl.ConnectHttp;
import akka.http.javadsl.Http;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.stream.ActorMaterializer;
import akka.stream.javadsl.Flow;
import io.mantisrx.master.JobClustersManagerActor;
import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandlerAkkaImpl;
import io.mantisrx.master.events.*;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.job.JobTestHelper;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.scheduler.AgentsErrorMonitorActor;
import io.mantisrx.master.scheduler.FakeMantisScheduler;
import io.mantisrx.server.master.persistence.FileBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
public class JobStatusRouteTest {
private final static Logger logger = LoggerFactory.getLogger(JobStatusRouteTest.class);
private final ActorMaterializer materializer = ActorMaterializer.create(system);
private final Http http = Http.get(system);
private static Thread t;
private static final int serverPort = 8207;
private static CompletionStage<ServerBinding> binding;
private static ActorSystem system = ActorSystem.create("JobStatusRoute");
private static ActorRef agentsErrorMonitorActor = system.actorOf(AgentsErrorMonitorActor.props());
private static ActorRef statusEventBrokerActor = system.actorOf(StatusEventBrokerActor.props(agentsErrorMonitorActor));
@BeforeClass
public static void setup() throws Exception {
JobTestHelper.deleteAllFiles();
JobTestHelper.createDirsIfRequired();
final CountDownLatch latch = new CountDownLatch(1);
t = new Thread(() -> {
try {
// boot up server using the route as defined below
final Http http = Http.get(system);
final ActorMaterializer materializer = ActorMaterializer.create(system);
final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl());
ActorRef jobClustersManagerActor = system.actorOf(JobClustersManagerActor.props(
new MantisJobStore(new FileBasedPersistenceProvider(true)), lifecycleEventPublisher, CostsCalculator.noop()), "jobClustersManager");
MantisSchedulerFactory fakeSchedulerFactory = mock(MantisSchedulerFactory.class);
MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor);
when(fakeSchedulerFactory.forJob(any())).thenReturn(fakeScheduler);
jobClustersManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(fakeSchedulerFactory, false), ActorRef.noSender());
agentsErrorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(fakeScheduler), ActorRef.noSender());
final JobStatusRouteHandler jobStatusRouteHandler = new JobStatusRouteHandlerAkkaImpl(system, statusEventBrokerActor);
final JobStatusRoute jobStatusRoute = new JobStatusRoute(jobStatusRouteHandler);
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = jobStatusRoute.createRoute(Function.identity()).flow(system, materializer);
logger.info("starting test server on port {}", serverPort);
latch.countDown();
binding = http.bindAndHandle(routeFlow,
ConnectHttp.toHost("localhost", serverPort), materializer);
} catch (Exception e) {
logger.info("caught exception", e);
latch.countDown();
e.printStackTrace();
}
});
t.setDaemon(true);
t.start();
latch.await();
}
@AfterClass
public static void teardown() {
logger.info("JobStatusRouteTest teardown");
binding
.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> system.terminate()); // and shutdown when done
t.interrupt();
}
}
| 7,920 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v0/JobDiscoveryRouteTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.http.javadsl.ConnectHttp;
import akka.http.javadsl.Http;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.stream.ActorMaterializer;
import akka.stream.javadsl.Flow;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.JobClustersManagerActor;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler;
import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandlerAkkaImpl;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.job.JobTestHelper;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.scheduler.AgentsErrorMonitorActor;
import io.mantisrx.master.scheduler.FakeMantisScheduler;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.NamedJobInfo;
import io.mantisrx.server.master.persistence.FileBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import java.time.Duration;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import rx.Observable;
public class JobDiscoveryRouteTest {
private final static Logger logger = LoggerFactory.getLogger(JobDiscoveryRouteTest.class);
private final ActorMaterializer materializer = ActorMaterializer.create(system);
private final Http http = Http.get(system);
private static Thread t;
private static final int serverPort = 8217;
private static volatile CompletionStage<ServerBinding> binding;
private static ActorSystem system = ActorSystem.create("JobDiscoveryRoute");
private static ActorRef agentsErrorMonitorActor = system.actorOf(AgentsErrorMonitorActor.props());
private final TestMantisClient mantisClient = new TestMantisClient(serverPort);
@BeforeClass
public static void setup() throws Exception {
JobTestHelper.deleteAllFiles();
JobTestHelper.createDirsIfRequired();
final CountDownLatch latch = new CountDownLatch(1);
t = new Thread(() -> {
try {
// boot up server using the route as defined below
final Http http = Http.get(system);
final ActorMaterializer materializer = ActorMaterializer.create(system);
final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl());
TestHelpers.setupMasterConfig();
ActorRef jobClustersManagerActor = system.actorOf(
JobClustersManagerActor.props(
new MantisJobStore(new FileBasedPersistenceProvider(true)),
lifecycleEventPublisher,
CostsCalculator.noop()),
"jobClustersManager");
MantisSchedulerFactory fakeSchedulerFactory = mock(MantisSchedulerFactory.class);
MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor);
when(fakeSchedulerFactory.forJob(any())).thenReturn(fakeScheduler);
jobClustersManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(fakeSchedulerFactory, false), ActorRef.noSender());
agentsErrorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(fakeScheduler), ActorRef.noSender());
Duration idleTimeout = system.settings().config().getDuration("akka.http.server.idle-timeout");
logger.info("idle timeout {} sec ", idleTimeout.getSeconds());
final JobDiscoveryRouteHandler jobDiscoveryRouteHandler = new JobDiscoveryRouteHandlerAkkaImpl(jobClustersManagerActor, idleTimeout);
final JobDiscoveryRoute jobDiscoveryRoute = new JobDiscoveryRoute(jobDiscoveryRouteHandler);
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = jobDiscoveryRoute.createRoute(Function.identity()).flow(system, materializer);
logger.info("starting test server on port {}", serverPort);
binding = http.bindAndHandle(routeFlow,
ConnectHttp.toHost("localhost", serverPort), materializer);
latch.countDown();
} catch (Exception e) {
logger.info("caught exception", e);
latch.countDown();
e.printStackTrace();
}
});
t.setDaemon(true);
t.start();
latch.await();
}
@AfterClass
public static void teardown() {
logger.info("JobDiscoveryRouteTest teardown");
if (binding != null) {
binding
.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> system.terminate()); // and shutdown when done
}
t.interrupt();
}
@Test
public void testSchedulingInfoStreamForNonExistentJob() throws InterruptedException {
// The current behavior of Mantis client is to retry non-200 responses
// This test overrides the default retry/repeat behavior to test a Sched info observable would complete if the job id requested is non-existent
final CountDownLatch latch = new CountDownLatch(1);
Observable<JobSchedulingInfo> jobSchedulingInfoObservable = mantisClient
.schedulingChanges("testJobCluster-1",
obs -> Observable.just(1),
obs -> Observable.empty()
);
jobSchedulingInfoObservable
.doOnNext(x -> logger.info("onNext {}", x))
.doOnError(t -> logger.warn("onError", t))
.doOnCompleted(() -> {
logger.info("onCompleted");
latch.countDown();
})
.subscribe();
latch.await();
}
@Test
public void testNamedJobInfoStreamForNonExistentJob() throws InterruptedException {
// The current behavior of Mantis client is to retry non-200 responses
// This test overrides the default retry/repeat behavior to test a namedjob info observable would complete if the job cluster requested is non-existent
final CountDownLatch latch = new CountDownLatch(1);
Observable<NamedJobInfo> jobSchedulingInfoObservable = mantisClient
.namedJobInfo("testJobCluster",
obs -> Observable.just(1),
obs -> Observable.empty()
);
jobSchedulingInfoObservable
.doOnNext(x -> logger.info("onNext {}", x))
.doOnError(t -> logger.warn("onError", t))
.doOnCompleted(() -> {
logger.info("onCompleted");
latch.countDown();
})
.subscribe();
latch.await();
}
}
| 7,921 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v0/AgentClusterRouteTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.http.javadsl.ConnectHttp;
import akka.http.javadsl.Http;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.HttpEntity;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.stream.ActorMaterializer;
import akka.stream.javadsl.Flow;
import akka.util.ByteString;
import com.netflix.fenzo.AutoScaleAction;
import com.netflix.fenzo.AutoScaleRule;
import com.netflix.fenzo.VirtualMachineLease;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.JobClustersManagerActor;
import io.mantisrx.master.api.akka.payloads.AgentClusterPayloads;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.scheduler.FakeMantisScheduler;
import io.mantisrx.master.scheduler.JobMessageRouterImpl;
import io.mantisrx.master.vm.AgentClusterOperations;
import io.mantisrx.master.vm.AgentClusterOperationsImpl;
import io.mantisrx.server.master.AgentClustersAutoScaler;
import io.mantisrx.server.master.persistence.FileBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
public class AgentClusterRouteTest {
private final static Logger logger = LoggerFactory.getLogger(AgentClusterRouteTest.class);
private final ActorMaterializer materializer = ActorMaterializer.create(system);
private final Http http = Http.get(system);
private static Thread t;
private static final int serverPort = 8209;
private static final ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
// private static final AgentClusterOperations agentClusterOperations = mock(AgentClusterOperations.class);
private CompletionStage<String> processRespFut(final HttpResponse r, final int expectedStatusCode) {
logger.info("headers {} {}", r.getHeaders(), r.status());
assertEquals(expectedStatusCode, r.status().intValue());
assert(r.getHeader("Access-Control-Allow-Origin").isPresent());
assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value());
CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer);
return strictEntity.thenCompose(s ->
s.getDataBytes()
.runFold(ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer)
.thenApply(s2 -> s2.utf8String())
);
}
private String getResponseMessage(final String msg, final Throwable t) {
if (t != null) {
logger.error("got err ", t);
fail(t.getMessage());
} else {
return msg;
}
return "";
}
private static CompletionStage<ServerBinding> binding;
private static ActorSystem system = ActorSystem.create("AgentClusterRoutes");
@BeforeClass
public static void setup() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
t = new Thread(() -> {
try {
// boot up server using the route as defined below
TestHelpers.setupMasterConfig();
final Http http = Http.get(system);
final ActorMaterializer materializer = ActorMaterializer.create(system);
IMantisPersistenceProvider storageProvider = new FileBasedPersistenceProvider(true);
final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl());
ActorRef jobClustersManagerActor = system.actorOf(
JobClustersManagerActor.props(
new MantisJobStore(storageProvider),
lifecycleEventPublisher,
CostsCalculator.noop()),
"jobClustersManager");
MantisSchedulerFactory fakeSchedulerFactory = mock(MantisSchedulerFactory.class);
MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor);
when(fakeSchedulerFactory.forJob(any())).thenReturn(fakeScheduler);
jobClustersManagerActor.tell(
new JobClusterManagerProto.JobClustersManagerInitialize(fakeSchedulerFactory, false), ActorRef.noSender());
setupDummyAgentClusterAutoScaler();
final AgentClusterRoute v0AgentClusterRoute = new AgentClusterRoute(
new AgentClusterOperationsImpl(storageProvider,
new JobMessageRouterImpl(jobClustersManagerActor),
fakeScheduler,
lifecycleEventPublisher,
"cluster"),
system);
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = v0AgentClusterRoute.createRoute(Function.identity()).flow(system, materializer);
logger.info("test server starting on port {}", serverPort);
binding = http.bindAndHandle(routeFlow,
ConnectHttp.toHost("localhost", serverPort), materializer);
latch.countDown();
} catch (Exception e) {
logger.info("caught exception", e);
latch.countDown();
e.printStackTrace();
}
});
t.setDaemon(true);
t.start();
latch.await();
}
@AfterClass
public static void teardown() {
logger.info("V0AgentClusterRouteTest teardown");
binding
.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> system.terminate()); // and shutdown when done
t.interrupt();
}
private String agentClusterEndpoint(final String endpoint) {
return String.format("http://127.0.0.1:%d/api/vm/activevms/%s", serverPort, endpoint);
}
private static void setupDummyAgentClusterAutoScaler() {
final AutoScaleRule dummyAutoScaleRule = new AutoScaleRule() {
@Override
public String getRuleName() {
return "test";
}
@Override
public int getMinIdleHostsToKeep() {
return 1;
}
@Override
public int getMaxIdleHostsToKeep() {
return 10;
}
@Override
public long getCoolDownSecs() {
return 300;
}
@Override
public boolean idleMachineTooSmall(VirtualMachineLease lease) {
return false;
}
@Override
public int getMinSize() {
return 1;
}
@Override
public int getMaxSize() {
return 100;
}
};
try {
AgentClustersAutoScaler.initialize(() -> new HashSet<>(Collections.singletonList(dummyAutoScaleRule)), new Observer<AutoScaleAction>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(AutoScaleAction autoScaleAction) {
}
});
} catch (Exception e) {
logger.info("AgentClustersAutoScaler is already initialized by another test", e);
}
}
@Test
public void testIt() throws Exception {
testSetActiveVMs();
testGetJobsOnVMs();
testGetAgentClustersList();
testGetActiveAgentClusters();
}
private void testSetActiveVMs() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.POST(agentClusterEndpoint(AgentClusterRoute.SETACTIVE))
.withEntity(AgentClusterPayloads.SET_ACTIVE));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
latch.countDown();
});
assertTrue(latch.await(10, TimeUnit.SECONDS));
}
private void testGetJobsOnVMs() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(agentClusterEndpoint(AgentClusterRoute.LISTJOBSONVMS)));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
// TODO validate jobs on VM response
assertEquals("{}", responseMessage);
latch.countDown();
});
assertTrue(latch.await(1, TimeUnit.SECONDS));
}
private void testGetAgentClustersList() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(agentClusterEndpoint(AgentClusterRoute.LISTAGENTCLUSTERS)));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
try {
Map<String, AgentClusterOperations.AgentClusterAutoScaleRule> agentClusterAutoScaleRule = mapper.readValue(responseMessage,
new TypeReference<Map<String, AgentClusterOperations.AgentClusterAutoScaleRule>>() {});
agentClusterAutoScaleRule.values().forEach(autoScaleRule -> {
assertEquals("test", autoScaleRule.getName());
assertEquals(300, autoScaleRule.getCooldownSecs());
assertEquals(1, autoScaleRule.getMinIdle());
assertEquals(10, autoScaleRule.getMaxIdle());
assertEquals(1, autoScaleRule.getMinSize());
assertEquals(100, autoScaleRule.getMaxSize());
});
} catch (IOException e) {
logger.error("caught error", e);
fail("failed to deserialize response");
}
// assertEquals("{}", responseMessage);
latch.countDown();
});
assertTrue(latch.await(1, TimeUnit.SECONDS));
}
private void testGetActiveAgentClusters() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(agentClusterEndpoint(AgentClusterRoute.LISTACTIVE)));
responseFuture
.thenCompose(r -> processRespFut(r, 200))
.whenComplete((msg, t) -> {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
assertEquals(AgentClusterPayloads.SET_ACTIVE, responseMessage);
latch.countDown();
});
assertTrue(latch.await(1, TimeUnit.SECONDS));
}
}
| 7,922 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/v0/MasterDescriptionRouteTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.v0;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import akka.NotUsed;
import akka.actor.ActorSystem;
import akka.http.javadsl.ConnectHttp;
import akka.http.javadsl.Http;
import akka.http.javadsl.ServerBinding;
import akka.http.javadsl.model.HttpEntity;
import akka.http.javadsl.model.HttpRequest;
import akka.http.javadsl.model.HttpResponse;
import akka.stream.ActorMaterializer;
import akka.stream.javadsl.Flow;
import akka.util.ByteString;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.jobcluster.job.JobTestHelper;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class MasterDescriptionRouteTest {
private final static Logger logger = LoggerFactory.getLogger(MasterDescriptionRouteTest.class);
private final ActorMaterializer materializer = ActorMaterializer.create(system);
private final Http http = Http.get(system);
private static Thread t;
private static final int serverPort = 8205;
private static final int targetEndpointPort = serverPort;
private static final MasterDescription fakeMasterDesc = new MasterDescription(
"localhost",
"127.0.0.1", targetEndpointPort,
targetEndpointPort + 2,
targetEndpointPort + 4,
"api/postjobstatus",
targetEndpointPort + 6,
System.currentTimeMillis());
private CompletionStage<String> processRespFut(final HttpResponse r, final Optional<Integer> expectedStatusCode) {
logger.info("headers {} {}", r.getHeaders(), r.status());
expectedStatusCode.ifPresent(sc -> assertEquals(sc.intValue(), r.status().intValue()));
assert(r.getHeader("Access-Control-Allow-Origin").isPresent());
assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value());
CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer);
return strictEntity.thenCompose(s ->
s.getDataBytes()
.runFold(ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer)
.thenApply(s2 -> s2.utf8String())
);
}
private String getResponseMessage(final String msg, final Throwable t) {
if (t != null) {
logger.error("got err ", t);
fail(t.getMessage());
} else {
return msg;
}
return "";
}
private static CompletionStage<ServerBinding> binding;
private static ActorSystem system = ActorSystem.create("MasterDescriptionRouteTest");
private static final MasterDescriptionRoute masterDescRoute;
static {
TestHelpers.setupMasterConfig();
masterDescRoute = new MasterDescriptionRoute(fakeMasterDesc);
}
@BeforeClass
public static void setup() throws Exception {
JobTestHelper.deleteAllFiles();
JobTestHelper.createDirsIfRequired();
final CountDownLatch latch = new CountDownLatch(1);
t = new Thread(() -> {
try {
// boot up server using the route as defined below
final Http http = Http.get(system);
final ActorMaterializer materializer = ActorMaterializer.create(system);
final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = masterDescRoute.createRoute(Function.identity()).flow(system, materializer);
logger.info("starting test server on port {}", serverPort);
latch.countDown();
binding = http.bindAndHandle(routeFlow,
ConnectHttp.toHost("localhost", serverPort), materializer);
} catch (Exception e) {
logger.info("caught exception", e);
latch.countDown();
e.printStackTrace();
}
});
t.setDaemon(true);
t.start();
latch.await();
}
@AfterClass
public static void teardown() {
logger.info("MasterDescriptionRouteTest teardown");
binding
.thenCompose(ServerBinding::unbind) // trigger unbinding from the port
.thenAccept(unbound -> system.terminate()); // and shutdown when done
t.interrupt();
}
private String masterEndpoint(final String ep) {
return String.format("http://127.0.0.1:%d/api/%s", targetEndpointPort, ep);
}
@Test
public void testMasterInfoAPI() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(masterEndpoint("masterinfo")));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((msg, t) -> {
try {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
MasterDescription masterDescription = Jackson.fromJSON(responseMessage, MasterDescription.class);
logger.info("master desc ---> {}", masterDescription);
assertEquals(fakeMasterDesc, masterDescription);
} catch (Exception e) {
fail("unexpected error "+ e.getMessage());
}
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
@Test
public void testMasterConfigAPI() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final CompletionStage<HttpResponse> responseFuture = http.singleRequest(
HttpRequest.GET(masterEndpoint("masterconfig")));
responseFuture
.thenCompose(r -> processRespFut(r, Optional.of(200)))
.whenComplete((msg, t) -> {
try {
String responseMessage = getResponseMessage(msg, t);
logger.info("got response {}", responseMessage);
List<MasterDescriptionRoute.Configlet> masterconfig = Jackson.fromJSON(responseMessage,
new TypeReference<List<MasterDescriptionRoute.Configlet>>() {});
logger.info("master config ---> {}", masterconfig);
assertEquals(masterDescRoute.getConfigs(), masterconfig);
} catch (Exception e) {
fail("unexpected error "+ e.getMessage());
}
latch.countDown();
});
assertTrue(latch.await(2, TimeUnit.SECONDS));
}
}
| 7,923 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/route/utils/JobRouteUtilsTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.route.utils;
import static org.junit.Assert.assertEquals;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.junit.Test;
public class JobRouteUtilsTest {
@Test
public void testListJobRequest() {
Map<String, List<String>> params = new HashMap<>();
params.put(JobRouteUtils.QUERY_PARAM_LIMIT, Arrays.asList("10"));
params.put(JobRouteUtils.QUERY_PARAM_JOB_STATE, Arrays.asList("Active"));
params.put(JobRouteUtils.QUERY_PARAM_STAGE_NUM, Arrays.asList("1"));
params.put(JobRouteUtils.QUERY_PARAM_WORKER_INDEX, Arrays.asList("11"));
params.put(JobRouteUtils.QUERY_PARAM_WORKER_NUM, Arrays.asList("233"));
params.put(JobRouteUtils.QUERY_PARAM_WORKER_STATE, Arrays.asList("Terminal"));
params.put(JobRouteUtils.QUERY_PARAM_ACTIVE_ONLY, Arrays.asList("False"));
params.put(JobRouteUtils.QUERY_PARAM_LABELS_QUERY, Arrays.asList("lab1=v1,lab3=v3"));
params.put(JobRouteUtils.QUERY_PARAM_LABELS_OPERAND, Arrays.asList("and"));
JobClusterManagerProto.ListJobsRequest listJobsRequest = JobRouteUtils.createListJobsRequest(params, Optional.of(".*abc.*"), true);
assertEquals(10, listJobsRequest.getCriteria().getLimit().get().intValue());
assertEquals(JobState.MetaState.Active, listJobsRequest.getCriteria().getJobState().get());
assertEquals(1, listJobsRequest.getCriteria().getStageNumberList().get(0).intValue());
assertEquals(11, listJobsRequest.getCriteria().getWorkerIndexList().get(0).intValue());
assertEquals(233, listJobsRequest.getCriteria().getWorkerNumberList().get(0).intValue());
assertEquals(1, listJobsRequest.getCriteria().getWorkerStateList().size());
assertEquals(WorkerState.MetaState.Terminal, listJobsRequest.getCriteria().getWorkerStateList().get(0));
assertEquals(false, listJobsRequest.getCriteria().getActiveOnly().get());
assertEquals(2, listJobsRequest.getCriteria().getMatchingLabels().size());
assertEquals("lab1", listJobsRequest.getCriteria().getMatchingLabels().get(0).getName());
assertEquals("v1", listJobsRequest.getCriteria().getMatchingLabels().get(0).getValue());
assertEquals("lab3", listJobsRequest.getCriteria().getMatchingLabels().get(1).getName());
assertEquals("v3", listJobsRequest.getCriteria().getMatchingLabels().get(1).getValue());
assertEquals("and", listJobsRequest.getCriteria().getLabelsOperand().get());
assertEquals(".*abc.*", listJobsRequest.getCriteria().getMatchingRegex().get());
}
@Test
public void testListJobRequestDefaults() {
JobClusterManagerProto.ListJobsRequest listJobsRequest2 = JobRouteUtils.createListJobsRequest(new HashMap<>(), Optional.empty(), true);
assertEquals(false, listJobsRequest2.getCriteria().getLimit().isPresent());
assertEquals(false, listJobsRequest2.getCriteria().getJobState().isPresent());
assertEquals(0, listJobsRequest2.getCriteria().getStageNumberList().size());
assertEquals(0, listJobsRequest2.getCriteria().getWorkerIndexList().size());
assertEquals(0, listJobsRequest2.getCriteria().getWorkerNumberList().size());
assertEquals(0, listJobsRequest2.getCriteria().getWorkerStateList().size());
assertEquals(true, listJobsRequest2.getCriteria().getActiveOnly().get());
assertEquals(0, listJobsRequest2.getCriteria().getMatchingLabels().size());
assertEquals(false, listJobsRequest2.getCriteria().getLabelsOperand().isPresent());
assertEquals(false, listJobsRequest2.getCriteria().getMatchingRegex().isPresent());
}
}
| 7,924 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/payloads/JobPayloads.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.payloads;
public class JobPayloads {
public static final String RESUBMIT_WORKER = "{" +
"\"JobId\": \"sine-function-1\"," +
"\"user\": \"JobRouteTest\"," +
"\"workerNumber\": 2," +
"\"reason\": \"test worker resubmit\"}";
public static final String RESUBMIT_WORKER_NONEXISTENT = "{" +
"\"JobId\": \"NonExistent-1\"," +
"\"user\": \"JobRouteTest\"," +
"\"workerNumber\": 2," +
"\"reason\": \"test worker resubmit\"}";
public static final String SCALE_STAGE = "{" +
"\"JobId\": \"sine-function-1\"," +
"\"NumWorkers\": 3," +
"\"StageNumber\": 1," +
"\"Reason\": \"test stage scaling\"}";
public static final String SCALE_STAGE_NonExistent = "{" +
"\"JobId\": \"NonExistent-1\"," +
"\"NumWorkers\": 3," +
"\"StageNumber\": 1," +
"\"Reason\": \"test stage scaling\"}";
public static final String KILL_JOB = "{" +
"\"JobId\": \"sine-function-1\"," +
"\"user\": \"JobRouteTest\"," +
"\"reason\": \"test job kill\"}";
public static final String KILL_JOB_NonExistent = "{" +
"\"JobId\": \"NonExistent-1\"," +
"\"user\": \"JobRouteTest\"," +
"\"reason\": \"test job kill\"}";
public static final String JOB_STATUS = "{\"jobId\":\"sine-function-1\",\"status\":{\"jobId\":\"sine-function-1\",\"stageNum\":1,\"workerIndex\":0,\"workerNumber\":2,\"type\":\"HEARTBEAT\",\"message\":\"heartbeat\",\"state\":\"Noop\",\"hostname\":null,\"timestamp\":1525813363585,\"reason\":\"Normal\",\"payloads\":[{\"type\":\"SubscriptionState\",\"data\":\"false\"},{\"type\":\"IncomingDataDrop\",\"data\":\"{\\\"onNextCount\\\":0,\\\"droppedCount\\\":0}\"}]}}";
}
| 7,925 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/payloads/JobClusterPayloads.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.payloads;
public class JobClusterPayloads {
public static final String JOB_CLUSTER_CREATE = "{\"jobDefinition\":{\"name\":\"sine-function\",\"user\":\"nmahilani\",\"jobJarFileLocation\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.2.9.zip\"," +
"\"version\":\"0.2.9 2018-05-29 16:12:56\",\"schedulingInfo\":{\"stages\":{" +
"\"1\":{\"numberOfInstances\":\"1\",\"machineDefinition\":{\"cpuCores\":\"1\",\"memoryMB\":\"1024\",\"diskMB\":\"1024\",\"networkMbps\":\"128\",\"numPorts\":\"1\"},\"scalable\":true," +
"\"scalingPolicy\":{\"stage\":1,\"min\":\"1\",\"max\":\"10\",\"increment\":\"2\",\"decrement\":\"1\",\"coolDownSecs\":\"600\"," +
"\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":\"15\",\"scaleUpAbovePct\":\"75\",\"rollingCount\":{\"count\":\"12\",\"of\":\"20\"}}},\"enabled\":true},\"softConstraints\":[],\"hardConstraints\":[]}}}," +
"\"parameters\":[],\"labels\":[{\"name\":\"_mantis.user\",\"value\":\"nmahilani\"},{\"name\":\"_mantis.ownerEmail\",\"value\":\"nmahilani@netflix.com\"},{\"name\":\"_mantis.jobType\",\"value\":\"other\"},{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.artifact.version\",\"value\":\"0.2.9\"}]," +
"\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":25,\\\"intervalMs\\\":60000}\"},\"slaMin\":\"0\",\"slaMax\":\"0\",\"cronSpec\":null,\"cronPolicy\":\"KEEP_EXISTING\",\"isReadyForJobMaster\":true}," +
"\"owner\":{\"contactEmail\":\"nmahilani@netflix.com\",\"description\":\"\",\"name\":\"Nick Mahilani\",\"repo\":\"\",\"teamName\":\"\"}}";
public static final String JOB_CLUSTER_CREATE_RC = "{\"jobDefinition\":{\"name\":\"sine-function-rc\","
+ "\"user\":\"nmahilani\",\"jobJarFileLocation\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.2.9.zip\"," +
"\"version\":\"0.2.9 2018-05-29 16:12:56\",\"schedulingInfo\":{\"stages\":{" +
"\"1\":{\"numberOfInstances\":\"1\",\"machineDefinition\":{\"cpuCores\":\"1\",\"memoryMB\":\"1024\",\"diskMB\":\"1024\",\"networkMbps\":\"128\",\"numPorts\":\"1\"},\"scalable\":true," +
"\"scalingPolicy\":{\"stage\":1,\"min\":\"1\",\"max\":\"10\",\"increment\":\"2\",\"decrement\":\"1\",\"coolDownSecs\":\"600\"," +
"\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":\"15\",\"scaleUpAbovePct\":\"75\",\"rollingCount\":{\"count\":\"12\",\"of\":\"20\"}}},\"enabled\":true},\"softConstraints\":[],\"hardConstraints\":[]}}}," +
"\"parameters\":[],\"labels\":[{\"name\":\"_mantis.user\",\"value\":\"nmahilani\"},{\"name\":\"_mantis.ownerEmail\",\"value\":\"nmahilani@netflix.com\"},{\"name\":\"_mantis.jobType\",\"value\":\"other\"},{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.artifact.version\",\"value\":\"0.2.9\"}]," +
"\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":25,\\\"intervalMs\\\":60000}\"},\"slaMin\":\"0\",\"slaMax\":\"0\",\"deploymentStrategy\":{\"resourceClusterId\":\"mantisagent\"},\"cronSpec\":null,\"cronPolicy\":\"KEEP_EXISTING\",\"isReadyForJobMaster\":true}," +
"\"owner\":{\"contactEmail\":\"nmahilani@netflix.com\",\"description\":\"\",\"name\":\"Nick Mahilani\",\"repo\":\"\",\"teamName\":\"\"}}";
public static final String JOB_CLUSTER_VALID_UPDATE = "{\"jobDefinition\":{\"name\":\"sine-function\",\"user\":\"nmahilani\",\"jobJarFileLocation\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.2.9.zip\"," +
"\"version\":\"0.2.9 2018-05-29 new version\",\"schedulingInfo\":{\"stages\":{" +
"\"1\":{\"numberOfInstances\":\"1\",\"machineDefinition\":{\"cpuCores\":\"1\",\"memoryMB\":\"1024\",\"diskMB\":\"1024\",\"networkMbps\":\"128\",\"numPorts\":\"1\"},\"scalable\":true," +
"\"scalingPolicy\":{\"stage\":1,\"min\":\"1\",\"max\":\"10\",\"increment\":\"2\",\"decrement\":\"1\",\"coolDownSecs\":\"600\"," +
"\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":\"15\",\"scaleUpAbovePct\":\"75\",\"rollingCount\":{\"count\":\"12\",\"of\":\"20\"}}},\"enabled\":true},\"softConstraints\":[],\"hardConstraints\":[]}}}," +
"\"parameters\":[],\"labels\":[{\"name\":\"_mantis.user\",\"value\":\"nmahilani\"},{\"name\":\"_mantis.ownerEmail\",\"value\":\"nmahilani@netflix.com\"},{\"name\":\"_mantis.jobType\",\"value\":\"other\"},{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.artifact.version\",\"value\":\"0.2.9\"}]," +
"\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":25,\\\"intervalMs\\\":60000}\"},\"slaMin\":\"0\",\"slaMax\":\"0\",\"cronSpec\":null,\"cronPolicy\":\"KEEP_EXISTING\",\"isReadyForJobMaster\":true}," +
"\"owner\":{\"contactEmail\":\"nmahilani@netflix.com\",\"description\":\"\",\"name\":\"Nick Mahilani\",\"repo\":\"\",\"teamName\":\"\"}}";
public static final String JOB_CLUSTER_INVALID_UPDATE = "{\"jobDefinition\":{\"name\":\"NonExistent\",\"user\":\"nmahilani\",\"jobJarFileLocation\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.2.9.zip\"," +
"\"version\":\"0.2.9 2018-05-29 new version\",\"schedulingInfo\":{\"stages\":{" +
"\"1\":{\"numberOfInstances\":\"1\",\"machineDefinition\":{\"cpuCores\":\"1\",\"memoryMB\":\"1024\",\"diskMB\":\"1024\",\"networkMbps\":\"128\",\"numPorts\":\"1\"},\"scalable\":true," +
"\"scalingPolicy\":{\"stage\":1,\"min\":\"1\",\"max\":\"10\",\"increment\":\"2\",\"decrement\":\"1\",\"coolDownSecs\":\"600\"," +
"\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":\"15\",\"scaleUpAbovePct\":\"75\",\"rollingCount\":{\"count\":\"12\",\"of\":\"20\"}}},\"enabled\":true},\"softConstraints\":[],\"hardConstraints\":[]}}}," +
"\"parameters\":[],\"labels\":[{\"name\":\"_mantis.user\",\"value\":\"nmahilani\"},{\"name\":\"_mantis.ownerEmail\",\"value\":\"nmahilani@netflix.com\"},{\"name\":\"_mantis.jobType\",\"value\":\"other\"},{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.artifact.version\",\"value\":\"0.2.9\"}]," +
"\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":25,\\\"intervalMs\\\":60000}\"},\"slaMin\":\"0\",\"slaMax\":\"0\",\"cronSpec\":null,\"cronPolicy\":\"KEEP_EXISTING\",\"isReadyForJobMaster\":true}," +
"\"owner\":{\"contactEmail\":\"nmahilani@netflix.com\",\"description\":\"\",\"name\":\"Nick Mahilani\",\"repo\":\"\",\"teamName\":\"\"}}";
public static final String JOB_CLUSTER_DELETE = "{\n" +
" \"name\": \"sine-function\",\n" +
" \"user\": \"test\"}";
public static final String JOB_CLUSTER_DISABLE = "{\n" +
" \"name\": \"sine-function\",\n" +
" \"user\": \"test\"}";
public static final String JOB_CLUSTER_DISABLE_NONEXISTENT = "{\n" +
" \"name\": \"NonExistent\",\n" +
" \"user\": \"test\"}";
public static final String JOB_CLUSTER_ENABLE = "{\n" +
" \"name\": \"sine-function\",\n" +
" \"user\": \"test\"}";
public static final String JOB_CLUSTER_ENABLE_NONEXISTENT = "{\n" +
" \"name\": \"NonExistent\",\n" +
" \"user\": \"test\"}";
public static final String JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT = "\n" +
"{\"name\":\"sine-function\",\"version\":\"0.1.39 2018-03-13 09:40:53\",\"url\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.1.39.zip\",\"skipsubmit\":true,\"user\":\"nmahilani\"}";
public static final String JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT_NON_EXISTENT = "\n" +
"{\"name\":\"NonExistent\",\"version\":\"0.1.39 2018-03-13 09:40:53\",\"url\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.1.39.zip\",\"skipsubmit\":true,\"user\":\"nmahilani\"}";
public static final String JOB_CLUSTER_UPDATE_SLA =
"{\"user\":\"nmahilani\",\"name\":\"sine-function\",\"min\":\"0\",\"max\":\"1\",\"cronspec\":\"\",\"cronpolicy\":\"KEEP_EXISTING\",\"forceenable\":false}";
public static final String JOB_CLUSTER_UPDATE_SLA_NONEXISTENT =
"{\"user\":\"nmahilani\",\"name\":\"NonExistent\",\"min\":\"0\",\"max\":\"1\",\"cronspec\":\"\",\"cronpolicy\":\"KEEP_EXISTING\",\"forceenable\":false}";
public static final String JOB_CLUSTER_UPDATE_LABELS =
"{\"name\":\"sine-function\",\"labels\":[{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.dataOrigin\",\"value\":\"none\"}],\"user\":\"nmahilani\"}";
public static final String JOB_CLUSTER_UPDATE_LABELS_NONEXISTENT =
"{\"name\":\"NonExistent\",\"labels\":[{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.dataOrigin\",\"value\":\"none\"}],\"user\":\"nmahilani\"}";
public static final String MIGRATE_STRATEGY_UPDATE =
"{\"name\":\"sine-function\",\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":99,\\\"intervalMs\\\":10000}\"},\"user\":\"nmahilani\"}";
public static final String MIGRATE_STRATEGY_UPDATE_NONEXISTENT =
"{\"name\":\"NonExistent\",\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":99,\\\"intervalMs\\\":10000}\"},\"user\":\"nmahilani\"}";
public static final String QUICK_SUBMIT =
"{\"name\":\"sine-function\",\"user\":\"nmahilani\",\"jobSla\":{\"durationType\":\"Perpetual\",\"runtimeLimitSecs\":\"0\",\"minRuntimeSecs\":\"0\",\"userProvidedType\":\"\"}}";
public static final String QUICK_SUBMIT_NONEXISTENT =
"{\"name\":\"NonExistent\",\"user\":\"nmahilani\",\"jobSla\":{\"durationType\":\"Perpetual\",\"runtimeLimitSecs\":\"0\",\"minRuntimeSecs\":\"0\",\"userProvidedType\":\"\"}}";
public static final String JOB_CLUSTER_SUBMIT = "{\"name\":\"sine-function\",\"user\":\"nmahilani\",\"jobJarFileLocation\":\"\",\"version\":\"0.2.9 2018-05-29 16:12:56\"," +
"\"jobSla\":{\"durationType\":\"Perpetual\",\"runtimeLimitSecs\":\"0\",\"minRuntimeSecs\":\"0\",\"userProvidedType\":\"\"}," +
"\"schedulingInfo\":{\"stages\":{\"0\":{\"numberOfInstances\":1,\"machineDefinition\":{\"cpuCores\":1,\"memoryMB\":200,\"diskMB\":1024,\"networkMbps\":128,\"numPorts\":\"1\"},\"scalable\":false}," +
"\"1\":{\"numberOfInstances\":1,\"machineDefinition\":{\"cpuCores\":1,\"memoryMB\":200,\"diskMB\":1024,\"networkMbps\":128,\"numPorts\":\"1\"},\"scalable\":true," +
"\"scalingPolicy\":{\"stage\":1,\"min\":1,\"max\":10,\"increment\":2,\"decrement\":1,\"coolDownSecs\":600," +
"\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":15,\"scaleUpAbovePct\":75,\"rollingCount\":{\"count\":12,\"of\":20}}},\"enabled\":true}," +
"\"softConstraints\":[\"M4Cluster\"],\"hardConstraints\":[]}}},\"parameters\":[{\"name\":\"useRandom\",\"value\":\"True\"}, {\"name\":\"periodInSeconds\",\"value\":2}],\"isReadyForJobMaster\":true}";
public static final String JOB_CLUSTER_SUBMIT_NonExistent = "{\"name\":\"NonExistent\",\"user\":\"nmahilani\",\"jobJarFileLocation\":\"\",\"version\":\"0.2.9 2018-05-29 16:12:56\"," +
"\"jobSla\":{\"durationType\":\"Perpetual\",\"runtimeLimitSecs\":\"0\",\"minRuntimeSecs\":\"0\",\"userProvidedType\":\"\"}," +
"\"schedulingInfo\":{\"stages\":{\"0\":{\"numberOfInstances\":1,\"machineDefinition\":{\"cpuCores\":1,\"memoryMB\":200,\"diskMB\":1024,\"networkMbps\":128,\"numPorts\":\"1\"},\"scalable\":false}," +
"\"1\":{\"numberOfInstances\":1,\"machineDefinition\":{\"cpuCores\":1,\"memoryMB\":200,\"diskMB\":1024,\"networkMbps\":128,\"numPorts\":\"1\"},\"scalable\":true," +
"\"scalingPolicy\":{\"stage\":1,\"min\":1,\"max\":10,\"increment\":2,\"decrement\":1,\"coolDownSecs\":600," +
"\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":15,\"scaleUpAbovePct\":75,\"rollingCount\":{\"count\":12,\"of\":20}}},\"enabled\":true}," +
"\"softConstraints\":[\"M4Cluster\"],\"hardConstraints\":[]}}},\"parameters\":[{\"name\":\"useRandom\",\"value\":\"True\"}, {\"name\":\"periodInSeconds\",\"value\":2}],\"isReadyForJobMaster\":true}";
}
| 7,926 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/payloads/ResourceClustersPayloads.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.payloads;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.util.Map;
public class ResourceClustersPayloads {
public static final String CLUSTER_ID = "mantisResourceClusterUT1";
public static final String RESOURCE_CLUSTER_CREATE =
PayloadUtils.getStringFromResource("testpayloads/ResourceClusterCreate.json");
public static final String RESOURCE_CLUSTER_SCALE_RESULT =
PayloadUtils.getStringFromResource("testpayloads/ResourceClusterScaleResult.json");
public static final String RESOURCE_CLUSTER_SCALE_RULES_CREATE =
PayloadUtils.getStringFromResource("testpayloads/ResourceClusterScaleRulesCreate.json");
public static final String RESOURCE_CLUSTER_SINGLE_SCALE_RULE_CREATE =
PayloadUtils.getStringFromResource("testpayloads/ResourceClusterScaleRuleCreate.json");
public static final String RESOURCE_CLUSTER_SCALE_RULES_RESULT =
PayloadUtils.getStringFromResource("testpayloads/ResourceClusterScaleRulesResult.json");
public static final String RESOURCE_CLUSTER_SKU_SCALE = "{\"clusterId\":{\"resourceID\": "
+ "\"mantisResourceClusterUT1\"},"
+ "\"skuId\":{\"resourceID\": \"small\"},\"region\":\"us-east-1\",\"envType\":\"Prod\","
+ "\"desireSize\":11}\n";
public static final String RESOURCE_CLUSTER_DISABLE_TASK_EXECUTORS_PAYLOAD = "" +
"{\n" +
" \"expirationDurationInHours\": 19,\n" +
" \"attributes\": {\n" +
" \"attr1\": \"attr1\"\n" +
" }\n" +
"}";
public static final Map<String, String> RESOURCE_CLUSTER_DISABLE_TASK_EXECUTORS_ATTRS =
ImmutableMap.of("attr1", "attr1");
}
| 7,927 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/payloads/AgentClusterPayloads.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.payloads;
public class AgentClusterPayloads {
public static final String SET_ACTIVE = "[\"mantistestagent-main-ec2-1\",\"mantistestagent-main-ec2-2\"]";
}
| 7,928 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/api/akka/payloads/PayloadUtils.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.api.akka.payloads;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
public class PayloadUtils {
public static String getStringFromResource(String fileName) {
InputStream inputStream =
ResourceClustersPayloads.class.getClassLoader().getResourceAsStream(fileName);
StringBuilder sb = new StringBuilder();
try (InputStreamReader streamReader =
new InputStreamReader(inputStream, StandardCharsets.UTF_8);
BufferedReader reader = new BufferedReader(streamReader)) {
String line;
while ((line = reader.readLine()) != null) {
sb.append(line);
}
} catch (IOException e) {
System.out.println("Failed to load resource: " + fileName + ". " + e);
throw new RuntimeException("Failed to load resource: " + fileName, e);
}
return sb.toString();
}
}
| 7,929 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/events/WorkerRegistryV2Test.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.events;
import static io.mantisrx.master.events.LifecycleEventsProto.StatusEvent.StatusEventType.INFO;
import static io.mantisrx.master.jobcluster.job.worker.MantisWorkerMetadataImpl.MANTIS_SYSTEM_ALLOCATED_NUM_PORTS;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS;
import static junit.framework.TestCase.assertTrue;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.testkit.javadsl.TestKit;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.events.LifecycleEventsProto.WorkerStatusEvent;
import io.mantisrx.master.jobcluster.WorkerInfoListHolder;
import io.mantisrx.master.jobcluster.job.IMantisStageMetadata;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.JobTestHelper;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterProto;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.persistence.exceptions.InvalidJobException;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.WorkerRegistry;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.util.*;
import java.util.concurrent.*;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class WorkerRegistryV2Test {
static ActorSystem system;
private static TestKit probe;
private static MantisJobStore jobStore;
private static IMantisPersistenceProvider storageProvider;
private static final String user = "mantis";
@BeforeClass
public static void setup() {
system = ActorSystem.create();
probe = new TestKit(system);
// JobTestHelper.createDirsIfRequired();
TestHelpers.setupMasterConfig();
// storageProvider = new MantisStorageProviderAdapter(new io.mantisrx.server.master.store.SimpleCachedFileStorageProvider(), eventPublisher);
// jobStore = new MantisJobStore(storageProvider);
}
@AfterClass
public static void tearDown() {
//((SimpleCachedFileStorageProvider)storageProvider).deleteAllFiles();
//JobTestHelper.deleteAllFiles();
TestKit.shutdownActorSystem(system);
system = null;
}
@Test
public void testGetRunningCount() {
WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2();
initRegistryWithWorkers(workerRegistryV2,"testGetRunningCount-1", 5);
assertEquals(5, workerRegistryV2.getNumRunningWorkers(null));
}
@Test
public void testIsWorkerValid() {
JobId jId = new JobId("testIsWorkerValid",1);
WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2();
initRegistryWithWorkers(workerRegistryV2,"testIsWorkerValid-1", 5);
for(int i=0; i<5; i++) {
assertTrue(workerRegistryV2.isWorkerValid(new WorkerId(jId.getId(),i,i+5)));
}
}
@Test
public void testGetAllRunningWorkers() {
WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2();
initRegistryWithWorkers(workerRegistryV2,"testGetAllRunningWorkers-1", 5);
Set<WorkerId> allRunningWorkers = workerRegistryV2.getAllRunningWorkers(null);
assertEquals(5, allRunningWorkers.size());
}
@Test
public void testGetSlaveIdMappings() {
WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2();
initRegistryWithWorkers(workerRegistryV2,"testGetSlaveIdMappings-1", 5);
Map<WorkerId, String> workerIdToSlaveIdMap = workerRegistryV2.getAllRunningWorkerSlaveIdMappings(null);
assertEquals(5, workerIdToSlaveIdMap.size());
for(int i=0; i<5; i++) {
assertEquals("slaveId-"+i, workerIdToSlaveIdMap.get(new WorkerId("testGetSlaveIdMappings-1",i,i+5)));
}
}
@Test
public void testGetAcceptedAt() {
WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2();
initRegistryWithWorkers(workerRegistryV2,"testGetAcceptedAt-1", 5);
Optional<Long> acceptedAt = workerRegistryV2.getAcceptedAt(new WorkerId("testGetAcceptedAt-1", 0, 5));
assertTrue(acceptedAt.isPresent());
assertEquals(new Long(0), acceptedAt.get());
// try an invalid worker
acceptedAt = workerRegistryV2.getAcceptedAt(new WorkerId("testGetAcceptedAt-1",10,1));
assertFalse(acceptedAt.isPresent());
}
@Test
public void testJobCompleteCleanup() {
WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2();
JobId jobId = new JobId("testJobCompleteCleanup", 1);
initRegistryWithWorkers(workerRegistryV2, "testJobCompleteCleanup-1", 5);
assertEquals(5, workerRegistryV2.getNumRunningWorkers(null));
workerRegistryV2.process(new LifecycleEventsProto.JobStatusEvent(INFO, "job shutdown",
jobId, JobState.Failed));
assertEquals(0, workerRegistryV2.getNumRunningWorkers(null));
}
@Test
public void testJobScaleUp() throws Exception, InvalidJobException, io.mantisrx.runtime.command.InvalidJobException {
WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2();
LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new DummyWorkerEventSubscriberImpl(workerRegistryV2));
Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>();
smap.put(StageScalingPolicy.ScalingReason.CPU, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.CPU, 0.5, 0.75, null));
smap.put(StageScalingPolicy.ScalingReason.DataDrop, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.DataDrop, 0.0, 2.0, null));
SchedulingInfo sInfo = new SchedulingInfo.Builder()
.numberOfStages(1)
.multiWorkerScalableStageWithConstraints(1,
new MachineDefinition(1.0,1.0,1.0,3),
Lists.newArrayList(),
Lists.newArrayList(),
new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap))
.build();
String clusterName = "testJobScaleUp";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, eventPublisher);
assertEquals(2, workerRegistryV2.getNumRunningWorkers(null));
// send scale up request
jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName+"-1", 1, 2, "", ""), probe.getRef());
JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
System.out.println("ScaleupResp " + scaleResp.message);
assertEquals(SUCCESS, scaleResp.responseCode);
assertEquals(2,scaleResp.getActualNumWorkers());
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobActor,clusterName+"-1",0,new WorkerId(clusterName+"-1",1,3));
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("user", new JobId(clusterName,1)),probe.getRef());
JobClusterManagerProto.GetJobDetailsResponse resp = probe.expectMsgClass(JobClusterManagerProto.GetJobDetailsResponse.class);
Map<Integer, ? extends IMantisStageMetadata> stageMetadata = resp.getJobMetadata().get().getStageMetadata();
assertEquals(2, stageMetadata.get(1).getAllWorkers().size());
int cnt = 0;
for(int i=0; i<50; i++) {
cnt++;
if(workerRegistryV2.getNumRunningWorkers(null) == 3) {
break;
}
}
assertTrue(cnt < 50);
}
@Test
public void testJobScaleDown() throws Exception {
WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2();
LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new DummyWorkerEventSubscriberImpl(workerRegistryV2));
Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>();
smap.put(StageScalingPolicy.ScalingReason.CPU, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.CPU, 0.5, 0.75, null));
smap.put(StageScalingPolicy.ScalingReason.DataDrop, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.DataDrop, 0.0, 2.0, null));
SchedulingInfo sInfo = new SchedulingInfo.Builder()
.numberOfStages(1)
.multiWorkerScalableStageWithConstraints(2,
new MachineDefinition(1.0,1.0,1.0,3),
Lists.newArrayList(),
Lists.newArrayList(),
new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap))
.build();
String clusterName = "testJobScaleDown";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, eventPublisher);
assertEquals(3, workerRegistryV2.getNumRunningWorkers(null));
// send scale down request
jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName+"-1",1, 1, "", ""), probe.getRef());
JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
System.out.println("ScaleDownResp " + scaleResp.message);
assertEquals(SUCCESS, scaleResp.responseCode);
assertEquals(1,scaleResp.getActualNumWorkers());
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("user", new JobId(clusterName,1)),probe.getRef());
JobClusterManagerProto.GetJobDetailsResponse resp = probe.expectMsgClass(JobClusterManagerProto.GetJobDetailsResponse.class);
Map<Integer, ? extends IMantisStageMetadata> stageMetadata = resp.getJobMetadata().get().getStageMetadata();
assertEquals(1, stageMetadata.get(1).getAllWorkers().size());
int cnt = 0;
for(int i=0; i<50; i++) {
cnt++;
if(workerRegistryV2.getNumRunningWorkers(null) == 2) {
break;
}
}
assertTrue(cnt < 50);
// assertEquals(2, WorkerRegistryV2.INSTANCE.getNumRunningWorkers());
}
@Test
public void testJobShutdown() {
WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2();
LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new DummyWorkerEventSubscriberImpl(workerRegistryV2));
Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>();
smap.put(StageScalingPolicy.ScalingReason.CPU, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.CPU, 0.5, 0.75, null));
smap.put(StageScalingPolicy.ScalingReason.DataDrop, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.DataDrop, 0.0, 2.0, null));
SchedulingInfo sInfo = new SchedulingInfo.Builder()
.numberOfStages(1)
.multiWorkerScalableStageWithConstraints(1,
new MachineDefinition(1.0,1.0,1.0,3),
Lists.newArrayList(),
Lists.newArrayList(),
new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap))
.build();
String clusterName = "testJobShutdown";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
try {
ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, eventPublisher);
assertEquals(2, workerRegistryV2.getNumRunningWorkers(null));
jobActor.tell(new JobClusterProto.KillJobRequest(
new JobId(clusterName,1), "test reason", JobCompletedReason.Normal, "nj", probe.getRef()), probe.getRef());
probe.expectMsgClass(JobClusterProto.KillJobResponse.class);
Thread.sleep(1000);
int cnt = 0;
for(int i=0; i<100; i++) {
cnt++;
if(workerRegistryV2.getNumRunningWorkers(null) == 0) {
break;
}
}
assertTrue(cnt < 100);
// assertEquals(0, WorkerRegistryV2.INSTANCE.getNumRunningWorkers());
} catch (Exception e) {
e.printStackTrace();
}
}
// @Test
public void multiThreadAccessTest() {
WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2();
CountDownLatch latch = new CountDownLatch(1);
List<Writer> writerList = generateWriters(workerRegistryV2,4, latch);
TotalWorkerCountReader reader = new TotalWorkerCountReader(workerRegistryV2, latch);
ExecutorService fixedThreadPoolExecutor = Executors.newFixedThreadPool(5);
try {
Future<Integer> maxCountSeen = fixedThreadPoolExecutor.submit(reader);
fixedThreadPoolExecutor.invokeAll(writerList);
int expectedCount = workerRegistryV2.getNumRunningWorkers(null);
System.out.println("Actual no of workers " + workerRegistryV2.getNumRunningWorkers(null));
int maxSeenCount = maxCountSeen.get();
System.out.println("Max Count seen " + maxCountSeen.get());
assertEquals(expectedCount, maxSeenCount);
} catch (InterruptedException e) {
fail();
e.printStackTrace();
} catch (ExecutionException
e) {
fail();
e.printStackTrace();
}
}
List<Writer> generateWriters(WorkerEventSubscriber subscriber, int count, CountDownLatch latch) {
List<Writer> writerList = new ArrayList<>();
for(int i=0; i<count ;i++) {
JobId jId = new JobId("multiThreadAccessTest" + i, 1);
Writer writer1 = new Writer(subscriber, jId, 10, latch);
writerList.add(writer1);
}
return writerList;
}
private void initRegistryWithWorkers(WorkerRegistryV2 workerRegistryV2, String jobId, int noOfWorkers) {
LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new NoOpWorkerEventSubscriberImpl());
JobId jId = JobId.fromId(jobId).get();
List<IMantisWorkerMetadata> workerMetadataList = new ArrayList<>();
for(int i=0; i<noOfWorkers; i++) {
JobWorker jb = new JobWorker.Builder()
.withAcceptedAt(i)
.withJobId(jId)
.withSlaveID("slaveId-" + i)
.withState(WorkerState.Launched)
.withWorkerIndex(i)
.withWorkerNumber(i+5)
.withStageNum(1)
.withNumberOfPorts(1 + MANTIS_SYSTEM_ALLOCATED_NUM_PORTS)
.withLifecycleEventsPublisher(eventPublisher)
.build();
workerMetadataList.add(jb.getMetadata());
}
LifecycleEventsProto.WorkerListChangedEvent workerListChangedEvent = new LifecycleEventsProto.WorkerListChangedEvent(new WorkerInfoListHolder(jId, workerMetadataList));
workerRegistryV2.process(workerListChangedEvent);
}
class DummyWorkerEventSubscriberImpl implements WorkerEventSubscriber {
WorkerEventSubscriber workerRegistry;
public DummyWorkerEventSubscriberImpl(WorkerEventSubscriber wr) {
this.workerRegistry = wr;
}
@Override
public void process(LifecycleEventsProto.WorkerListChangedEvent event) {
workerRegistry.process(event);
}
@Override
public void process(LifecycleEventsProto.JobStatusEvent statusEvent) {
workerRegistry.process(statusEvent);
}
@Override
public void process(WorkerStatusEvent workerStatusEvent) {
workerRegistry.process(workerStatusEvent);
}
}
class NoOpWorkerEventSubscriberImpl implements WorkerEventSubscriber {
@Override
public void process(LifecycleEventsProto.WorkerListChangedEvent event) {
}
@Override
public void process(LifecycleEventsProto.JobStatusEvent statusEvent) {
}
@Override
public void process(WorkerStatusEvent workerStatusEvent) {
}
}
class Writer implements Callable<Void> {
private final int noOfWorkers;
private final JobId jobId;
WorkerEventSubscriber subscriber;
CountDownLatch latch;
public Writer(WorkerEventSubscriber subscriber, JobId jobId, int totalWorkerCount, CountDownLatch latch) {
this.subscriber = subscriber;
this.jobId = jobId;
this.noOfWorkers = totalWorkerCount;
this.latch = latch;
}
@Override
public Void call() throws Exception {
LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new NoOpWorkerEventSubscriberImpl());
List<IMantisWorkerMetadata> workerMetadataList = new ArrayList<>();
for(int i=0; i<noOfWorkers; i++) {
JobWorker jb = new JobWorker.Builder()
.withAcceptedAt(i)
.withJobId(jobId)
.withSlaveID("slaveId-" + i)
.withState(WorkerState.Launched)
.withWorkerIndex(i)
.withWorkerNumber(i+5)
.withStageNum(1)
.withNumberOfPorts(1 + MANTIS_SYSTEM_ALLOCATED_NUM_PORTS)
.withLifecycleEventsPublisher(eventPublisher)
.build();
workerMetadataList.add(jb.getMetadata());
}
latch.await();
for(int j =1; j<=noOfWorkers; j++) {
LifecycleEventsProto.WorkerListChangedEvent workerListChangedEvent = new LifecycleEventsProto.WorkerListChangedEvent(new WorkerInfoListHolder(jobId, workerMetadataList.subList(0, j)));
subscriber.process(workerListChangedEvent);
}
// for(int j =noOfWorkers-1; j>0; j--) {
// LifecycleEventsProto.WorkerListChangedEvent workerListChangedEvent = new LifecycleEventsProto.WorkerListChangedEvent(new WorkerInfoListHolder(jobId, workerMetadataList.subList(0, j)));
// subscriber.process(workerListChangedEvent);
// }
return null;
}
}
class TotalWorkerCountReader implements Callable<Integer> {
private final WorkerRegistry registry;
private final CountDownLatch latch;
public TotalWorkerCountReader(WorkerRegistryV2 registry, CountDownLatch latch) {
this.registry = registry;
this.latch = latch;
}
@Override
public Integer call() throws Exception {
int max = 0;
latch.countDown();
for(int i=0; i<100; i++) {
int cnt = registry.getNumRunningWorkers(null);
System.out.println("Total Cnt " + cnt);
if(cnt > max) {
max = cnt;
}
}
return max;
}
}
}
| 7,930 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/com/netflix/mantis/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/com/netflix/mantis/master/scheduler/TestHelpers.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.master.scheduler;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import com.netflix.fenzo.VirtualMachineLease;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.server.core.domain.JobMetadata;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.config.StaticPropertiesConfigurationFactory;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.mesos.VirtualMachineLeaseMesosImpl;
import io.mantisrx.server.master.scheduler.ScheduleRequest;
import java.util.Collections;
import java.util.Optional;
import java.util.Properties;
import org.apache.mesos.Protos;
public class TestHelpers {
public static VirtualMachineLeaseMesosImpl createMockLease(final String id,
final String hostname,
final String vmId,
final double cpuCores,
final double memoryMB,
final double networkMbps,
final double diskMB,
final VirtualMachineLease.Range range) {
final VirtualMachineLeaseMesosImpl lease = mock(VirtualMachineLeaseMesosImpl.class);
when(lease.hostname()).thenReturn(hostname);
when(lease.getId()).thenReturn(id);
when(lease.cpuCores()).thenReturn(cpuCores);
when(lease.diskMB()).thenReturn(diskMB);
when(lease.networkMbps()).thenReturn(networkMbps);
when(lease.memoryMB()).thenReturn(memoryMB);
when(lease.getAttributeMap()).thenReturn(Collections.emptyMap());
when(lease.getVMID()).thenReturn(vmId);
when(lease.portRanges()).thenReturn(Collections.singletonList(range));
final Protos.Offer offer = Protos.Offer.newBuilder().setId(Protos.OfferID.newBuilder().setValue(id).build())
.setFrameworkId(Protos.FrameworkID.newBuilder().setValue("TestFramework").build())
.setHostname(hostname)
.setSlaveId(Protos.SlaveID.newBuilder().setValue(vmId).build())
.build();
when(lease.getOffer()).thenReturn(offer);
return lease;
}
public static ScheduleRequest createFakeScheduleRequest(final WorkerId workerId,
final int stageNum,
final int numStages,
final MachineDefinition machineDefinition) {
try {
JobDefinition jobDefinition = new JobDefinition.Builder()
.withArtifactName("jar")
.withSchedulingInfo(new SchedulingInfo(Collections.singletonMap(0,
StageSchedulingInfo.builder()
.numberOfInstances(1)
.machineDefinition(machineDefinition)
.hardConstraints(Collections.emptyList()).softConstraints(Collections.emptyList())
.build())
))
.withJobSla(new JobSla(0, 0, null, MantisJobDurationType.Perpetual, null))
.build();
IMantisJobMetadata mantisJobMetadata = new MantisJobMetadataImpl.Builder()
.withJobId(JobId.fromId(workerId.getJobId()).get())
.withJobDefinition(jobDefinition)
.build();
return new ScheduleRequest(
workerId,
stageNum,
numStages,
new JobMetadata(mantisJobMetadata.getJobId().getId(),
mantisJobMetadata.getJobJarUrl(),
mantisJobMetadata.getTotalStages(),
mantisJobMetadata.getUser(),
mantisJobMetadata.getSchedulingInfo(),
mantisJobMetadata.getParameters(),
mantisJobMetadata.getSubscriptionTimeoutSecs(),
0,
mantisJobMetadata.getMinRuntimeSecs()
),
mantisJobMetadata.getSla().get().getDurationType(),
machineDefinition,
Collections.emptyList(),
Collections.emptyList(),
0,Optional.empty()
);
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
public static void setupMasterConfig() {
final Properties props = new Properties();
props.setProperty("mantis.master.consoleport", "8080");
props.setProperty("mantis.master.apiport", "7070");
props.setProperty("mantis.master.metrics.port", "7102");
props.setProperty("mantis.master.apiportv2", "7075");
props.setProperty("mantis.master.schedInfoPort", "7076");
props.setProperty("mantis.master.workqueuelength", "100");
props.setProperty("mantis.master.storageProvider", "io.mantisrx.server.master.store.KeyValueStorageProvider.NoopStorageProvider");
props.setProperty("mantis.master.resourceClusterStorageProvider", "io.mantisrx.master.resourcecluster.resourceprovider.InMemoryOnlyResourceClusterStorageProvider");
props.setProperty("mantis.master.resourceClusterProvider", "io.mantisrx.master.resourcecluster.resourceprovider.NoopResourceClusterProvider");
props.setProperty("mantis.master.api.status.path", "api/postjobstatus");
props.setProperty("mantis.master.mesos.failover.timeout.ms", "1000.0");
props.setProperty("mantis.worker.executor.name", "Mantis Worker Executor");
props.setProperty("mantis.localmode", "true");
props.setProperty("mantis.zookeeper.connectionTimeMs", "1000");
props.setProperty("mantis.zookeeper.connection.retrySleepMs", "100");
props.setProperty("mantis.zookeeper.connection.retryCount", "3");
props.setProperty("mantis.zookeeper.connectString", "ec2-50-19-255-1.compute-1.amazonaws.com:2181,ec2-54-235-159-245.compute-1.amazonaws.com:2181,ec2-50-19-255-97.compute-1.amazonaws.com:2181,ec2-184-73-152-248.compute-1.amazonaws.com:2181,ec2-50-17-247-179.compute-1.amazonaws.com:2181");
props.setProperty("mantis.zookeeper.root", "/mantis/master");
props.setProperty("mantis.zookeeper.leader.election.path", "/hosts");
props.setProperty("mantis.zookeeper.leader.announcement.path", "/leader");
props.setProperty("mesos.master.location", "127.0.0.1:5050");
props.setProperty("mesos.worker.executorscript", "startup.sh");
props.setProperty("mesos.worker.installDir", "/tmp/mantisWorkerInstall");
props.setProperty("mantis.master.framework.name", "MantisFramework");
props.setProperty("mesos.worker.timeoutSecondsToReportStart", "5");
props.setProperty("mesos.lease.offer.expiry.secs", "1");
props.setProperty("mantis.master.stage.assignment.refresh.interval.ms","-1");
props.setProperty("mantis.master.api.cache.ttl.milliseconds","0");
ConfigurationProvider.initialize(new StaticPropertiesConfigurationFactory(props));
}
// public static MantisSchedulerFenzoImpl createMantisScheduler(final VMResourceManager vmResourceManager,
// final JobMessageRouter jobMessageRouter,
// final WorkerRegistry workerRegistry,
// final AgentClustersAutoScaler agentClustersAutoScaler) {
// final ClusterResourceMetricReporter metricReporterMock = mock(ClusterResourceMetricReporter.class);
//
// final SchedulingResultHandler schedulingResultHandler =
// new SchedulingResultHandler(vmResourceManager, jobMessageRouter, workerRegistry);
// TestHelpers.setupMasterConfig();
// final MantisSchedulerFenzoImpl mantisScheduler = new MantisSchedulerFenzoImpl(vmResourceManager,
// schedulingResultHandler,
// metricReporterMock,
// agentClustersAutoScaler,
// workerRegistry);
// mantisScheduler.start();
// return mantisScheduler;
// }
}
| 7,931 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/com/netflix/mantis/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/com/netflix/mantis/master/scheduler/MantisSchedulerFenzoImplTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//package com.netflix.mantis.master.scheduler;
//
//import com.netflix.fenzo.VirtualMachineLease;
//import com.netflix.mantis.master.JobMessageRouter;
//import com.netflix.mantis.master.WorkerRegistry;
//import com.netflix.mantis.master.jobcluster.job.worker.events.WorkerLaunchFailed;
//import com.netflix.mantis.master.jobcluster.job.worker.events.WorkerLaunched;
//import com.netflix.mantis.master.jobcluster.job.worker.events.WorkerUnscheduleable;
//import com.netflix.mantis.master.resourcemgmt.VMResourceManager;
//
//
//import io.mantisrx.runtime.MachineDefinition;
//import io.mantisrx.server.master.AgentClustersAutoScaler;
//import io.mantisrx.server.master.LaunchTaskException;
//import io.mantisrx.server.master.config.ConfigurationProvider;
//import io.mantisrx.server.master.domain.JobId;
//import io.mantisrx.server.master.domain.WorkerId;
//import io.mantisrx.server.master.domain.WorkerPorts;
//import io.mantisrx.server.master.mesos.VirtualMachineLeaseMesosImpl;
//import org.junit.Test;
//
//import java.net.MalformedURLException;
//import java.util.Arrays;
//import java.util.Collections;
//import java.util.Optional;
//import java.util.function.Consumer;
//
//import static org.mockito.Mockito.*;
//
//public class MantisSchedulerFenzoImplTest {
// final VMResourceManager vmResourceManagerMock = mock(VMResourceManager.class);
// final JobMessageRouter jobMessageRouterMock = mock(JobMessageRouter.class);
// final WorkerRegistry workerRegistryMock = mock(WorkerRegistry.class);
// final AgentClustersAutoScaler agentClustersAutoScalerMock = mock(AgentClustersAutoScaler.class);
//
// public void runTestCase(final MantisSchedulerFenzoImpl mantisSchedulerFenzo,
// final Consumer<MantisSchedulerFenzoImpl> consumer) {
// consumer.accept(mantisSchedulerFenzo);
// mantisSchedulerFenzo.shutdown();
// }
//
// @Test
// public void testWorkerLaunchSuccess() throws MalformedURLException {
// final JobId jobId = JobId.fromId("TestJobCluster-1").get();
// final WorkerId workerId = new WorkerId(jobId, 1, 2);
// final String fakeHostname = "127.0.0.1";
// final String fakeVMId = "VM_ID";
//
// VirtualMachineLeaseMesosImpl leaseMock = TestHelpers.createMockLease("lease_id", fakeHostname, fakeVMId, 4.0,
// 12000, 1024, 1024, new VirtualMachineLease.Range(15000, 15010));
//
//// jobLocatorMock.locateJob(jobId);
//// when(jobLocatorMock.locateJob(jobId)).thenReturn(jobManagerMock);
//
// when(workerRegistryMock.getAcceptedAt(workerId)).thenReturn(Optional.empty());
// final MantisSchedulerFenzoImpl schedulerFenzo = TestHelpers.createMantisScheduler(vmResourceManagerMock, jobMessageRouterMock, workerRegistryMock, agentClustersAutoScalerMock);
//
// runTestCase(schedulerFenzo, mantisScheduler -> {
//
// mantisScheduler.addOffers(Arrays.asList(leaseMock));
// ScheduleRequest fakeScheduleRequest = TestHelpers.createFakeScheduleRequest(workerId, 0, 1, new MachineDefinition(2, 1024, 128, 1024, 4));
// mantisScheduler.scheduleWorker(fakeScheduleRequest);
// WorkerPorts expectedAssignedPorts = new WorkerPorts(Arrays.asList(15000, 15001, 15002));
// WorkerLaunched expectedLaunchedEvent = new WorkerLaunched(workerId, fakeHostname, fakeVMId, expectedAssignedPorts);
// verify(jobMessageRouterMock, timeout(1_000).times(1)).routeWorkerEvent(expectedLaunchedEvent);
// verifyNoMoreInteractions(jobMessageRouterMock);
// });
// }
//
//
//
// @Test
// public void testWorkerLaunchFailed() throws MalformedURLException {
// final JobId jobId = JobId.fromId("TestJobCluster-1").get();
// final WorkerId workerId = new WorkerId(jobId, 1, 2);
// final String fakeHostname = "127.0.0.1";
// final String fakeVMId = "VM_ID";
// WorkerPorts workerPorts = new WorkerPorts(Arrays.asList(15000, 15001, 15002));
//
// VirtualMachineLeaseMesosImpl leaseMock = TestHelpers.createMockLease("lease_id", fakeHostname, fakeVMId, 4.0,
// 12000, 1024, 1024, new VirtualMachineLease.Range(15000, 15010));
//
// // when(jobLocatorMock.locateJob(jobId)).thenReturn(jobManagerMock);
// when(workerRegistryMock.getAcceptedAt(workerId)).thenReturn(Optional.empty());
//
// ScheduleRequest fakeScheduleRequest = TestHelpers.createFakeScheduleRequest(workerId, 0, 1, new MachineDefinition(2, 1024, 128, 1024, 4));
//
// // Simulate Mesos launch failure, should trigger a WorkerLaunched event followed by a WorkerLaunchFailed event
// when(vmResourceManagerMock.launchTasks(Arrays.asList(new LaunchTaskRequest(fakeScheduleRequest, workerPorts)), Arrays.asList(leaseMock)))
// .thenReturn(Collections.singletonMap(fakeScheduleRequest, new LaunchTaskException("fake exception", new IllegalStateException())));
//
// final MantisSchedulerFenzoImpl schedulerFenzo = TestHelpers.createMantisScheduler(vmResourceManagerMock, jobMessageRouterMock, workerRegistryMock, agentClustersAutoScalerMock);
//
// runTestCase(schedulerFenzo, mantisScheduler -> {
//
// mantisScheduler.addOffers(Arrays.asList(leaseMock));
// mantisScheduler.scheduleWorker(fakeScheduleRequest);
// WorkerPorts expectedAssignedPorts = new WorkerPorts(Arrays.asList(15000, 15001, 15002));
// WorkerLaunched expectedLaunchedEvent = new WorkerLaunched(workerId, fakeHostname, fakeVMId, expectedAssignedPorts);
// WorkerLaunchFailed expectedLaunchFailedEvent = new WorkerLaunchFailed(workerId, String.format("%s failed due to fake exception", workerId.toString()));
// verify(jobMessageRouterMock, timeout(1_000).times(1)).routeWorkerEvent(expectedLaunchedEvent);
// verify(jobMessageRouterMock, timeout(1_000).times(1)).routeWorkerEvent(expectedLaunchFailedEvent);
// });
// }
//
//
// @Test
// public void testWorkerUnscheduleable() throws MalformedURLException {
// final JobId jobId = JobId.fromId("TestJobCluster-1").get();
// final WorkerId workerId = new WorkerId(jobId, 1, 2);
// final String fakeHostname = "127.0.0.1";
// final String fakeVMId = "VM_ID";
// final int requestedMemoryMB = 1024;
// final int memoryFromResourceOffer = requestedMemoryMB / 2;
//
// VirtualMachineLeaseMesosImpl leaseMock = TestHelpers.createMockLease("lease_id", fakeHostname, fakeVMId, 4.0,
// memoryFromResourceOffer, 1024, 1024, new VirtualMachineLease.Range(15000, 15010));
// // JobManager jobManagerMock = mock(JobManager.class);
// //when(jobLocatorMock.locateJob(jobId)).thenReturn(jobManagerMock);
// when(workerRegistryMock.getAcceptedAt(workerId)).thenReturn(Optional.empty());
//
// final MantisSchedulerFenzoImpl schedulerFenzo = TestHelpers.createMantisScheduler(vmResourceManagerMock, jobMessageRouterMock, workerRegistryMock, agentClustersAutoScalerMock);
//
// runTestCase(schedulerFenzo, mantisScheduler -> {
// mantisScheduler.addOffers(Arrays.asList(leaseMock));
// ScheduleRequest fakeScheduleRequest = TestHelpers.createFakeScheduleRequest(workerId, 0, 1, new MachineDefinition(2, requestedMemoryMB, 128, 1024, 4));
// mantisScheduler.scheduleWorker(fakeScheduleRequest);
// WorkerUnscheduleable expectedWorkerEvent = new WorkerUnscheduleable(workerId);
// verify(jobMessageRouterMock, timeout(1_000).times(1)).routeWorkerEvent(expectedWorkerEvent);
// verifyNoMoreInteractions(jobMessageRouterMock);
// });
// }
//
// @Test
// public void testLeaseRejectedAfterOfferExpiry() {
// final String fakeHostname = "127.0.0.1";
// final String fakeVMId = "VM_ID";
//
// VirtualMachineLeaseMesosImpl leaseMock = TestHelpers.createMockLease("lease_id", fakeHostname, fakeVMId, 4.0,
// 12000, 1024, 1024, new VirtualMachineLease.Range(15000, 15010));
//
// final MantisSchedulerFenzoImpl schedulerFenzo = TestHelpers.createMantisScheduler(vmResourceManagerMock, jobMessageRouterMock, workerRegistryMock, agentClustersAutoScalerMock);
//
// runTestCase(schedulerFenzo, mantisScheduler -> {
//
// mantisScheduler.addOffers(Arrays.asList(leaseMock));
// try {
// Thread.sleep(ConfigurationProvider.getConfig().getMesosLeaseOfferExpirySecs()*1000 + 50);
// } catch (InterruptedException e) {
// e.printStackTrace();
// }
//
// verifyZeroInteractions(jobMessageRouterMock);
// verifyZeroInteractions(workerRegistryMock);
// verify(vmResourceManagerMock, timeout(10_000).times(1)).rejectLease(leaseMock);
// verifyNoMoreInteractions(vmResourceManagerMock);
// });
// }
//}
| 7,932 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/MantisAuditLogEvent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
public class MantisAuditLogEvent {
private final Type type;
private final String operand;
private final String data;
public MantisAuditLogEvent(Type type, String operand, String data) {
this.type = type;
this.operand = operand;
this.data = data;
}
public Type getType() {
return type;
}
public String getOperand() {
return operand;
}
public String getData() {
return data;
}
public enum Type {
NAMED_JOB_CREATE,
NAMED_JOB_UPDATE,
NAMED_JOB_DELETE,
NAMED_JOB_DISABLED,
NAMED_JOB_ENABLED,
JOB_SUBMIT,
JOB_TERMINATE,
JOB_DELETE,
JOB_SCALE_UP,
JOB_SCALE_DOWN,
JOB_SCALE_UPDATE,
WORKER_START,
WORKER_TERMINATE,
CLUSTER_SCALE_UP,
CLUSTER_SCALE_DOWN,
CLUSTER_ACTIVE_VMS
}
}
| 7,933 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/LeadershipManagerZkImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.GaugeCallback;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.master.config.MasterConfiguration;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.time.Duration;
import java.time.Instant;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.functions.Func0;
public class LeadershipManagerZkImpl implements ILeadershipManager {
private static final Logger logger = LoggerFactory.getLogger(LeadershipManagerZkImpl.class);
private final Gauge isLeaderGauge;
private final Gauge isLeaderReadyGauge;
private final AtomicBoolean firstTimeLeaderMode = new AtomicBoolean(false);
private final MasterConfiguration config;
private final ServiceLifecycle serviceLifecycle;
private volatile boolean isLeader = false;
private volatile boolean isReady = false;
private volatile Instant becameLeaderAt;
public LeadershipManagerZkImpl(final MasterConfiguration config,
final ServiceLifecycle serviceLifecycle) {
this.config = config;
this.serviceLifecycle = serviceLifecycle;
MetricGroupId metricGroupId = new MetricGroupId(MasterMain.class.getCanonicalName());
Func0<Double> leaderSinceInSeconds = () -> {
if (isLeader) {
return (double) Duration.between(becameLeaderAt, Instant.now()).getSeconds();
} else {
return 0.0;
}
};
Metrics m = new Metrics.Builder()
.id(metricGroupId)
.addGauge("isLeaderGauge")
.addGauge("isLeaderReadyGauge")
.addGauge(new GaugeCallback(metricGroupId, "leaderSinceInSeconds", leaderSinceInSeconds))
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
isLeaderGauge = m.getGauge("isLeaderGauge");
isLeaderReadyGauge = m.getGauge("isLeaderReadyGauge");
}
public void becomeLeader() {
logger.info("Becoming leader now");
if (firstTimeLeaderMode.compareAndSet(false, true)) {
serviceLifecycle.becomeLeader();
isLeaderGauge.set(1L);
becameLeaderAt = Instant.now();
} else {
logger.warn("Unexpected to be told to enter leader mode more than once, ignoring.");
}
isLeader = true;
}
public boolean isLeader() {
return isLeader;
}
public boolean isReady() {
return isReady;
}
public void setLeaderReady() {
logger.info("marking leader READY");
isLeaderReadyGauge.set(1L);
isReady = true;
}
public void stopBeingLeader() {
logger.info("Asked to stop being leader now");
isReady = false;
isLeader = false;
isLeaderGauge.set(0L);
isLeaderReadyGauge.set(0L);
if (!firstTimeLeaderMode.get()) {
logger.warn("Unexpected to be told to stop being leader when we haven't entered leader mode before, ignoring.");
return;
}
// Various services may have built in-memory state that is currently not easy to revert to initialization state.
// Until we create such a lifecycle feature for each service and all of their references, best thing to do is to
// exit the process and depend on a watcher process to restart us right away. Especially since restart isn't
// very expensive.
logger.error("Exiting due to losing leadership after running as leader");
System.exit(1);
}
public MasterDescription getDescription() {
return new MasterDescription(
getHost(),
getHostIP(),
config.getApiPort(),
config.getSchedInfoPort(),
config.getApiPortV2(),
config.getApiStatusUri(),
config.getConsolePort(),
System.currentTimeMillis()
);
}
private String getHost() {
String host = config.getMasterHost();
if (host != null) {
return host;
}
try {
return InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
throw new RuntimeException("Failed to get the host information: " + e.getMessage(), e);
}
}
private String getHostIP() {
String ip = config.getMasterIP();
if (ip != null) {
return ip;
}
try {
return InetAddress.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
throw new RuntimeException("Failed to get the host information: " + e.getMessage(), e);
}
}
}
| 7,934 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/ExecuteStageRequestFactory.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import io.mantisrx.server.core.ExecuteStageRequest;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.server.master.scheduler.ScheduleRequest;
import java.util.Optional;
import lombok.RequiredArgsConstructor;
@RequiredArgsConstructor
public class ExecuteStageRequestFactory {
private final MasterConfiguration masterConfiguration;
public ExecuteStageRequest of(
ScheduleRequest scheduleRequest,
TaskExecutorRegistration matchedTaskExecutorInfo) {
return new ExecuteStageRequest(
scheduleRequest.getWorkerId().getJobCluster(),
scheduleRequest.getWorkerId().getJobId(),
scheduleRequest.getWorkerId().getWorkerIndex(),
scheduleRequest.getWorkerId().getWorkerNum(),
scheduleRequest.getJobMetadata().getJobJarUrl(),
scheduleRequest.getStageNum(),
scheduleRequest.getJobMetadata().getTotalStages(),
matchedTaskExecutorInfo.getWorkerPorts().getPorts(),
masterConfiguration.getTimeoutSecondsToReportStart(),
matchedTaskExecutorInfo.getWorkerPorts().getMetricsPort(),
scheduleRequest.getJobMetadata().getParameters(),
scheduleRequest.getJobMetadata().getSchedulingInfo(),
scheduleRequest.getDurationType(),
scheduleRequest.getJobMetadata().getHeartbeatIntervalSecs(),
scheduleRequest.getJobMetadata().getSubscriptionTimeoutSecs(),
scheduleRequest.getJobMetadata().getMinRuntimeSecs() - (System.currentTimeMillis() - scheduleRequest.getJobMetadata().getMinRuntimeSecs()),
matchedTaskExecutorInfo.getWorkerPorts(),
Optional.empty(),
scheduleRequest.getJobMetadata().getUser());
}
}
| 7,935 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/DurationTypeFitnessCalculator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import com.netflix.fenzo.TaskAssignmentResult;
import com.netflix.fenzo.TaskRequest;
import com.netflix.fenzo.TaskTrackerState;
import com.netflix.fenzo.VMTaskFitnessCalculator;
import com.netflix.fenzo.VirtualMachineCurrentState;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.server.master.scheduler.ScheduleRequest;
public class DurationTypeFitnessCalculator implements VMTaskFitnessCalculator {
@Override
public String getName() {
return "Mantis Job Duration Type Task Fitness Calculator";
}
@Override
public double calculateFitness(TaskRequest taskRequest, VirtualMachineCurrentState targetVM, TaskTrackerState taskTrackerState) {
MantisJobDurationType durationType = ((ScheduleRequest) taskRequest).getDurationType();
int totalTasks = 0;
int sameTypeTasks = 0;
for (TaskRequest request : targetVM.getRunningTasks()) {
totalTasks++;
if (((ScheduleRequest) request).getDurationType() == durationType)
sameTypeTasks++;
}
for (TaskAssignmentResult result : targetVM.getTasksCurrentlyAssigned()) {
totalTasks++;
if (((ScheduleRequest) result.getRequest()).getDurationType() == durationType)
sameTypeTasks++;
}
if (totalTasks == 0)
return 0.9; // an arbitrary preferential value to indicate that a fresh new host is not perfect
// fit but a better fit than a host that has tasks of different type
return (double) sameTypeTasks / (double) totalTasks;
}
}
| 7,936 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/SchedulerCounters.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.util.concurrent.atomic.AtomicInteger;
public class SchedulerCounters {
private static final SchedulerCounters instance = new SchedulerCounters();
private final AtomicInteger iterationNumberCounter = new AtomicInteger();
private final AtomicInteger numResourceAllocationTrials = new AtomicInteger(0);
private volatile IterationCounter counter = null;
private SchedulerCounters() {
}
public static SchedulerCounters getInstance() {
return instance;
}
public void incrementResourceAllocationTrials(int delta) {
numResourceAllocationTrials.addAndGet(delta);
}
public IterationCounter getCounter() {
return counter;
}
void endIteration(int numWorkersToLaunch, int numWorkersLaunched, int numSlavesToUse, int numSlavesRejected) {
counter = new IterationCounter(iterationNumberCounter.getAndIncrement(), numWorkersToLaunch,
numWorkersLaunched, numSlavesToUse, numSlavesRejected, numResourceAllocationTrials.getAndSet(0));
}
String toJsonString() {
return counter.toJsonString();
}
public class IterationCounter {
@JsonIgnore
private final ObjectMapper mapper = new ObjectMapper();
private int iterationNumber;
private int numWorkersToLaunch;
private int numWorkersLaunched;
private int numSlavesToUse;
private int numSlavesRejected;
private int numResourceAllocations;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
IterationCounter(@JsonProperty("iterationNumber") int iterationNumber,
@JsonProperty("numWorkersToLaunch") int numWorkersToLaunch,
@JsonProperty("numWorkersLaunched") int numWorkersLaunched,
@JsonProperty("numSlavesToUse") int numOffersToUse,
@JsonProperty("numSlavesRejected") int numOffersRejected,
@JsonProperty("numResourceAllocationTrials") int numResourceAllocations) {
this.iterationNumber = iterationNumber;
this.numWorkersToLaunch = numWorkersToLaunch;
this.numWorkersLaunched = numWorkersLaunched;
this.numSlavesToUse = numOffersToUse;
this.numSlavesRejected = numOffersRejected;
this.numResourceAllocations = numResourceAllocations;
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
void setCounters(int numWorkersToLaunch,
int numWorkersLaunched,
int numOffersToUse,
int numOffersRejected) {
this.iterationNumber++;
this.numWorkersToLaunch = numWorkersToLaunch;
this.numWorkersLaunched = numWorkersLaunched;
this.numSlavesToUse = numOffersToUse;
this.numSlavesRejected = numOffersRejected;
}
public int getIterationNumber() {
return iterationNumber;
}
public int getNumWorkersToLaunch() {
return numWorkersToLaunch;
}
public int getNumWorkersLaunched() {
return numWorkersLaunched;
}
public int getNumSlavesToUse() {
return numSlavesToUse;
}
public int getNumSlavesRejected() {
return numSlavesRejected;
}
public int getNumResourceAllocations() {
return numResourceAllocations;
}
public String toJsonString() {
try {
return mapper.writeValueAsString(this);
} catch (JsonProcessingException e) {
// shouldn't happen
return iterationNumber + ", " + numWorkersToLaunch + ", " + numWorkersLaunched + ", " + numSlavesToUse
+ ", " + numSlavesRejected + ", " + numResourceAllocations;
}
}
}
}
| 7,937 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/LeadershipManagerLocalImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import io.mantisrx.server.core.master.MasterDescription;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class LeadershipManagerLocalImpl implements ILeadershipManager {
private static final Logger logger = LoggerFactory.getLogger(LeadershipManagerLocalImpl.class);
private final MasterDescription masterDescription;
private volatile boolean isLeader = true;
private volatile boolean isReady = false;
public LeadershipManagerLocalImpl(MasterDescription masterDescription) {
this.masterDescription = masterDescription;
}
@Override
public void becomeLeader() {
logger.info("Becoming leader now");
isLeader = true;
}
@Override
public boolean isLeader() {
logger.debug("is leader? {}", isLeader);
return isLeader;
}
@Override
public boolean isReady() {
return isReady;
}
@Override
public void setLeaderReady() {
logger.info("marking leader READY");
isReady = true;
}
@Override
public void stopBeingLeader() {
logger.info("Asked to stop being leader now");
isReady = false;
isLeader = false;
}
@Override
public MasterDescription getDescription() {
return masterDescription;
}
}
| 7,938 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/MantisJobStatus.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import io.mantisrx.server.core.Status;
import rx.Observable;
public class MantisJobStatus {
private String jobId;
private String name;
private long timestamp;
private Observable<Status> status;
private String fatalError = null;
MantisJobStatus(String jobId, Observable<Status> status,
String name) {
this.jobId = jobId;
this.status = status;
this.name = name;
timestamp = System.currentTimeMillis();
}
public String getJobId() {
return jobId;
}
public Observable<Status> getStatus() {
return status;
}
public String getName() {
return name;
}
public long getTimestamp() {
return timestamp;
}
public boolean hasFatalError() {
return fatalError != null;
}
public String getFatalError() {
return fatalError;
}
public void setFatalError(String fatalError) {
this.fatalError = fatalError;
}
}
| 7,939 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/AgentClustersAutoScaler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import com.netflix.fenzo.AutoScaleAction;
import com.netflix.fenzo.AutoScaleRule;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import rx.Observer;
import rx.functions.Func0;
public class AgentClustersAutoScaler {
private static final AtomicBoolean initialized = new AtomicBoolean(false);
private static AgentClustersAutoScaler autoScaler;
private final Func0<Set<AutoScaleRule>> rulesGetter;
private final Observer<AutoScaleAction> autoScaleActionObserver;
private AgentClustersAutoScaler(Func0<Set<AutoScaleRule>> rulesGetter, Observer<AutoScaleAction> autoScaleActionObserver) {
this.rulesGetter = rulesGetter;
this.autoScaleActionObserver = autoScaleActionObserver;
}
public synchronized static void initialize(Func0<Set<AutoScaleRule>> rulesGetter, Observer<AutoScaleAction> autoScaleActionObserver) {
if (!initialized.compareAndSet(false, true))
throw new IllegalStateException(AgentClustersAutoScaler.class.getName() + " already initialized");
autoScaler = new AgentClustersAutoScaler(rulesGetter, autoScaleActionObserver);
}
public static AgentClustersAutoScaler get() throws IllegalStateException {
if (!initialized.get())
throw new IllegalStateException(AgentClustersAutoScaler.class.getName() + " not initialized");
return autoScaler;
}
public Set<AutoScaleRule> getRules() {
return rulesGetter == null ? null : rulesGetter.call();
}
public Observer<AutoScaleAction> getAutoScaleActionObserver() {
return autoScaleActionObserver;
}
}
| 7,940 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/AgentFitnessCalculator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import com.netflix.fenzo.TaskRequest;
import com.netflix.fenzo.TaskTrackerState;
import com.netflix.fenzo.VMTaskFitnessCalculator;
import com.netflix.fenzo.VirtualMachineCurrentState;
import com.netflix.fenzo.functions.Func1;
import com.netflix.fenzo.plugins.BinPackingFitnessCalculators;
import io.mantisrx.server.master.config.ConfigurationProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AgentFitnessCalculator implements VMTaskFitnessCalculator {
private static final Logger logger = LoggerFactory.getLogger(AgentFitnessCalculator.class);
final VMTaskFitnessCalculator binPacker = BinPackingFitnessCalculators.cpuMemNetworkBinPacker;
final VMTaskFitnessCalculator durationTypeFitnessCalculator = new DurationTypeFitnessCalculator();
final VMTaskFitnessCalculator clusterFitnessCalculator = new ClusterFitnessCalculator();
private final double binPackingWeight;
private final double clusterWeight;
private final double durationTypeWeight;
private final double goodEnoughThreshold;
private final Func1<Double, Boolean> fitnessGoodEnoughFunc;
public AgentFitnessCalculator() {
binPackingWeight = ConfigurationProvider.getConfig().getBinPackingFitnessWeight();
clusterWeight = ConfigurationProvider.getConfig().getPreferredClusterFitnessWeight();
durationTypeWeight = ConfigurationProvider.getConfig().getDurationTypeFitnessWeight();
goodEnoughThreshold = ConfigurationProvider.getConfig().getFitnessGoodEnoughThreshold();
logger.info("clusterWeight {} durationTypeWeight {} binPackingWeight {} goodEnoughThreshold {}", clusterWeight, durationTypeWeight, binPackingWeight, goodEnoughThreshold);
this.fitnessGoodEnoughFunc = f -> f > goodEnoughThreshold;
}
@Override
public String getName() {
return "Mantis Agent Task Fitness Calculator";
}
@Override
public double calculateFitness(TaskRequest taskRequest, VirtualMachineCurrentState targetVM, TaskTrackerState taskTrackerState) {
double binPackingValue = binPacker.calculateFitness(taskRequest, targetVM, taskTrackerState);
double durationTypeFitness = durationTypeFitnessCalculator.calculateFitness(taskRequest, targetVM, taskTrackerState);
double clusterFitnessValue = clusterFitnessCalculator.calculateFitness(taskRequest, targetVM, taskTrackerState);
// add others such as stream locality fitness calculator
if (logger.isDebugEnabled()) {
logger.debug("cluster {} duration {} binpack score {} total {}", clusterFitnessValue * clusterWeight,
durationTypeFitness * durationTypeWeight, binPackingValue * binPackingWeight,
(binPackingValue * binPackingWeight + durationTypeFitness * durationTypeWeight + clusterFitnessValue * clusterWeight));
}
return (binPackingValue * binPackingWeight + durationTypeFitness * durationTypeWeight + clusterFitnessValue * clusterWeight) /
(binPackingWeight + durationTypeWeight + clusterWeight);
}
public Func1<Double, Boolean> getFitnessGoodEnoughFunc() {
return fitnessGoodEnoughFunc;
}
}
| 7,941 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/LaunchTaskException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
public class LaunchTaskException extends Exception {
private static final long serialVersionUID = 1L;
public LaunchTaskException(String message, Throwable cause) {
super(message, cause);
}
}
| 7,942 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/VirtualMachineMasterService.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import com.netflix.fenzo.VirtualMachineLease;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.scheduler.LaunchTaskRequest;
import io.mantisrx.server.master.scheduler.ScheduleRequest;
import java.util.List;
import java.util.Map;
public interface VirtualMachineMasterService {
Map<ScheduleRequest, LaunchTaskException> launchTasks(List<LaunchTaskRequest> requests, List<VirtualMachineLease> leases);
void rejectLease(VirtualMachineLease lease);
void killTask(final WorkerId workerId);
}
| 7,943 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/ClusterAffinityConstraint.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import com.netflix.fenzo.ConstraintEvaluator;
import com.netflix.fenzo.TaskRequest;
import com.netflix.fenzo.TaskTrackerState;
import com.netflix.fenzo.VirtualMachineCurrentState;
import java.util.Map;
import org.apache.mesos.Protos;
public class ClusterAffinityConstraint implements ConstraintEvaluator {
private final String asgAttributeName;
private final String clusterName;
private final String name;
public ClusterAffinityConstraint(String clusterAttributeName, String clusterName) {
this.asgAttributeName = clusterAttributeName;
this.clusterName = clusterName;
this.name = ClusterAffinityConstraint.class.getName() + "-" + clusterAttributeName;
}
@Override
public String getName() {
return name;
}
/**
* Determines whether a particular target host is appropriate for a particular task request by rejecting any
* host that doesn't belong to the specified cluster.
*
* @param taskRequest describes the task being considered for assignment to the host
* @param targetVM describes the host being considered as a target for the task
* @param taskTrackerState describes the state of tasks previously assigned or already running throughout
* the system
*
* @return a successful Result if the target does not have the same value for its unique constraint
* attribute as another host that has already been assigned a co-task of {@code taskRequest}, or an
* unsuccessful Result otherwise
*/
@Override
public Result evaluate(TaskRequest taskRequest, VirtualMachineCurrentState targetVM,
TaskTrackerState taskTrackerState) {
//String clusterName = AttributeUtilities.getAttrValue(targetVM.getCurrAvailableResources(), hostAttributeName);
Map<String, Protos.Attribute> attributeMap = targetVM.getCurrAvailableResources().getAttributeMap();
if (asgAttributeName != null && attributeMap != null && attributeMap.get(asgAttributeName) != null) {
if (attributeMap.get(asgAttributeName).getText().isInitialized()) {
String targetClusterName = attributeMap.get(asgAttributeName).getText().getValue();
if (targetClusterName.startsWith(clusterName)) {
return new Result(true, "");
} else {
return new Result(false, asgAttributeName + " does not begin with " + clusterName);
}
}
}
return new Result(false, asgAttributeName + " unavailable on host " + targetVM.getCurrAvailableResources().hostname());
}
}
| 7,944 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/MantisJobOperations.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.MantisJobDefinition;
import io.mantisrx.runtime.NamedJobDefinition;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.server.master.store.InvalidJobException;
import io.mantisrx.server.master.store.InvalidNamedJobException;
import io.mantisrx.server.master.store.NamedJob;
import io.mantisrx.server.master.store.NamedJobDeleteException;
import java.io.IOException;
import java.net.URL;
import java.util.List;
import java.util.Optional;
import rx.Observable;
import rx.functions.Action1;
public interface MantisJobOperations {
NamedJob createNamedJob(NamedJobDefinition namedJobDefinition)
throws InvalidNamedJobException;
NamedJob updateNamedJar(NamedJobDefinition namedJobDefinition, boolean createIfNeeded) throws InvalidNamedJobException;
NamedJob quickUpdateNamedJob(String user, String name, URL jobJar, String version) throws InvalidNamedJobException;
void updateSla(String user, String name, NamedJob.SLA sla, boolean forceEnable) throws InvalidNamedJobException;
/**
* Update the Labels associated with the Job cluster. This complete replaces any existing labels.
*
* @param user : submitter
* @param name : Job cluster name
* @param labels List of Label objects
*
* @throws InvalidNamedJobException
*/
void updateLabels(String user, String name, List<Label> labels) throws InvalidNamedJobException;
void updateMigrateStrategy(String user, String name, WorkerMigrationConfig migrationConfig) throws InvalidNamedJobException;
String quickSubmit(String jobName, String user) throws InvalidNamedJobException, InvalidJobException;
Optional<NamedJob> getNamedJob(String name);
void deleteNamedJob(String name, String user) throws NamedJobDeleteException;
void disableNamedJob(String name, String user) throws InvalidNamedJobException;
void enableNamedJob(String name, String user) throws InvalidNamedJobException;
MantisJobStatus submit(MantisJobDefinition jobDefinition);
boolean deleteJob(String jobId) throws IOException;
void killJob(String user, String jobId, String reason);
void terminateJob(String jobId);
Observable<MantisJobStatus> jobs();
MantisJobStatus status(String jobId);
Action1<String> getSlaveDisabler();
Action1<String> getSlaveEnabler();
void setReady();
}
| 7,945 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/ConstraintsEvaluators.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import com.netflix.fenzo.AsSoftConstraint;
import com.netflix.fenzo.ConstraintEvaluator;
import com.netflix.fenzo.VMTaskFitnessCalculator;
import com.netflix.fenzo.functions.Func1;
import com.netflix.fenzo.plugins.BalancedHostAttrConstraint;
import com.netflix.fenzo.plugins.ExclusiveHostConstraint;
import com.netflix.fenzo.plugins.UniqueHostAttrConstraint;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.server.master.config.ConfigurationProvider;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ConstraintsEvaluators {
private static final String MANTISAGENT_MAIN_M4 = "mantisagent-main-m4";
private static final String MANTISAGENT_MAIN_M3 = "mantisagent-main-m3";
private static final String MANTISAGENT_MAIN_M5 = "mantisagent-main-m5";
private static final int EXPECTED_NUM_ZONES = 3;
private static final Logger logger = LoggerFactory.getLogger(ConstraintsEvaluators.class);
public static ExclusiveHostConstraint exclusiveHostConstraint = new ExclusiveHostConstraint();
public static ConstraintEvaluator hardConstraint(JobConstraints constraint, final Set<String> coTasks) {
switch (constraint) {
case ExclusiveHost:
return exclusiveHostConstraint;
case UniqueHost:
return new UniqueHostAttrConstraint(new Func1<String, Set<String>>() {
@Override
public Set<String> call(String s) {
return coTasks;
}
});
case ZoneBalance:
return new BalancedHostAttrConstraint(new Func1<String, Set<String>>() {
@Override
public Set<String> call(String s) {
return coTasks;
}
}, zoneAttributeName(), EXPECTED_NUM_ZONES);
case M4Cluster:
return new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M4);
case M3Cluster:
return new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M3);
case M5Cluster:
return new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M5);
default:
logger.error("Unknown job hard constraint " + constraint);
return null;
}
}
public static String asgAttributeName() {
return ConfigurationProvider.getConfig().getActiveSlaveAttributeName();
}
public static String zoneAttributeName() {
return ConfigurationProvider.getConfig().getHostZoneAttributeName();
}
public static VMTaskFitnessCalculator softConstraint(JobConstraints constraint, final Set<String> coTasks) {
switch (constraint) {
case ExclusiveHost:
return AsSoftConstraint.get(exclusiveHostConstraint);
case UniqueHost:
return AsSoftConstraint.get(new UniqueHostAttrConstraint(new Func1<String, Set<String>>() {
@Override
public Set<String> call(String s) {
return coTasks;
}
}));
case ZoneBalance:
return new BalancedHostAttrConstraint(new Func1<String, Set<String>>() {
@Override
public Set<String> call(String s) {
return coTasks;
}
}, zoneAttributeName(), EXPECTED_NUM_ZONES).asSoftConstraint();
case M4Cluster:
return AsSoftConstraint.get(new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M4));
case M3Cluster:
return AsSoftConstraint.get(new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M3));
case M5Cluster:
return AsSoftConstraint.get(new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M5));
default:
logger.error("Unknown job soft constraint " + constraint);
return null;
}
}
}
| 7,946 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/MantisAuditLogWriter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
import rx.Subscriber;
import rx.functions.Action0;
import rx.observers.SerializedObserver;
import rx.schedulers.Schedulers;
import rx.subjects.PublishSubject;
public class MantisAuditLogWriter {
private static final Logger logger = LoggerFactory.getLogger(MantisAuditLogWriter.class);
private static MantisAuditLogWriter instance;
private final PublishSubject<MantisAuditLogEvent> subject;
private final int backPressureBufferSize = 1000;
private MantisAuditLogWriter(Subscriber<MantisAuditLogEvent> subscriber) {
subject = PublishSubject.create();
subject
.onBackpressureBuffer(backPressureBufferSize, new Action0() {
@Override
public void call() {
logger.warn("Exceeded back pressure buffer of " + backPressureBufferSize);
}
})
.observeOn(Schedulers.computation())
.subscribe(subscriber);
}
public static void initialize(Subscriber<MantisAuditLogEvent> subscriber) {
instance = new MantisAuditLogWriter(subscriber);
}
public static MantisAuditLogWriter getInstance() {
if (instance == null)
throw new IllegalStateException(MantisAuditLogWriter.class.getName() + " must be initialized before use");
return instance;
}
public Observer<MantisAuditLogEvent> getObserver() {
return new SerializedObserver<>(subject);
}
}
| 7,947 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/WorkerRequest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.parameter.Parameter;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
public class WorkerRequest {
private final long subscriptionTimeoutSecs;
private final long minRuntimeSecs;
private final long jobSubmittedAt;
private final String user;
// preferred Cluster to launch the worker on
private final Optional<String> preferredCluster;
private String jobName;
private String jobId;
private int workerIndex;
private int workerNumber;
private URL jobJarUrl;
private int workerStage;
private int totalStages;
private MachineDefinition definition;
private int numInstancesAtStage;
private int numPortsPerInstance;
private int metricsPort = -1;
private int debugPort = -1;
private int consolePort = -1;
private int customPort = -1;
private List<Integer> ports;
private List<Parameter> parameters;
private JobSla jobSla;
private List<JobConstraints> hardConstraints;
private List<JobConstraints> softConstraints;
private SchedulingInfo schedulingInfo;
public WorkerRequest(MachineDefinition definition, String jobId,
int workerIndex, int workerNumber, URL jobJarUrl, int workerStage, int totalStages,
int numInstancesAtStage,
String jobName, int numPortsPerInstance,
List<Parameter> parameters, JobSla jobSla,
List<JobConstraints> hardConstraints, List<JobConstraints> softConstraints,
SchedulingInfo schedulingInfo, long subscriptionTimeoutSecs, long minRuntimeSecs, long jobSubmittedAt,
final String user, final Optional<String> preferredCluster) {
this.definition = definition;
this.jobId = jobId;
this.workerIndex = workerIndex;
this.workerNumber = workerNumber;
this.jobJarUrl = jobJarUrl;
this.workerStage = workerStage;
this.totalStages = totalStages;
this.numInstancesAtStage = numInstancesAtStage;
this.jobName = jobName;
this.numPortsPerInstance = numPortsPerInstance + 4; // add additional ports for metricsPort, debugPort, consolePort and customPort
ports = new ArrayList<>();
this.parameters = parameters;
this.jobSla = jobSla;
this.hardConstraints = hardConstraints;
this.softConstraints = softConstraints;
this.schedulingInfo = schedulingInfo;
this.subscriptionTimeoutSecs = subscriptionTimeoutSecs;
this.minRuntimeSecs = minRuntimeSecs;
this.jobSubmittedAt = jobSubmittedAt;
this.user = user;
this.preferredCluster = preferredCluster;
}
public static int getNumPortsPerInstance(MachineDefinition machineDefinition) {
return machineDefinition.getNumPorts() + 1;
}
public SchedulingInfo getSchedulingInfo() {
return schedulingInfo;
}
public List<Parameter> getParameters() {
return parameters;
}
public MachineDefinition getDefinition() {
return definition;
}
public String getJobId() {
return jobId;
}
public int getWorkerIndex() {
return workerIndex;
}
public int getWorkerNumber() {
return workerNumber;
}
public URL getJobJarUrl() {
return jobJarUrl;
}
public int getWorkerStage() {
return workerStage;
}
public int getTotalStages() {
return totalStages;
}
public int getNumInstancesAtStage() {
return numInstancesAtStage;
}
public String getJobName() {
return jobName;
}
public int getNumPortsPerInstance() {
return numPortsPerInstance;
}
public int getMetricsPort() {
return metricsPort;
}
public int getDebugPort() {
return debugPort;
}
public int getConsolePort() {
return consolePort;
}
public int getCustomPort() {
return customPort;
}
public void addPort(int port) {
if (metricsPort == -1) {
metricsPort = port; // fill metricsPort first
} else if (debugPort == -1) {
debugPort = port; // fill debug port next
} else if (consolePort == -1) {
consolePort = port; // fill console port next
} else if (customPort == -1) {
customPort = port; // fill custom port next
} else {
ports.add(port);
}
}
public List<Integer> getPorts() {
return ports;
}
public List<Integer> getAllPortsUsed() {
List<Integer> allPorts = new ArrayList<>(ports);
allPorts.add(metricsPort);
allPorts.add(debugPort);
allPorts.add(consolePort);
allPorts.add(customPort);
return allPorts;
}
public JobSla getJobSla() {
return jobSla;
}
public List<JobConstraints> getHardConstraints() {
return hardConstraints;
}
public List<JobConstraints> getSoftConstraints() {
return softConstraints;
}
public long getSubscriptionTimeoutSecs() {
return subscriptionTimeoutSecs;
}
public long getMinRuntimeSecs() {
return minRuntimeSecs;
}
public long getJobSubmittedAt() {
return jobSubmittedAt;
}
public String getUser() {
return user;
}
public Optional<String> getPreferredCluster() {
return preferredCluster;
}
@Override
public String toString() {
return jobId + "-Stage-" + workerStage + "-Worker-" + workerIndex;
}
}
| 7,948 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/JobRequest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import io.mantisrx.runtime.MantisJobDefinition;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.store.NamedJob;
public class JobRequest {
private String jobId;
private MantisJobDefinition jobDefinition;
public JobRequest(final String jobId,
final MantisJobDefinition jobDefinition,
final NamedJob namedJob,
final MantisScheduler scheduler,
final VirtualMachineMasterService vmService) {
this.jobId = jobId;
this.jobDefinition = jobDefinition;
}
public String getJobId() {
return jobId;
}
public MantisJobDefinition getJobDefinition() {
return jobDefinition;
}
}
| 7,949 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/MasterMain.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import static org.apache.flink.configuration.GlobalConfiguration.loadConfiguration;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.DeadLetter;
import akka.actor.Props;
import com.netflix.fenzo.AutoScaleAction;
import com.netflix.fenzo.AutoScaleRule;
import com.netflix.fenzo.VirtualMachineLease;
import com.netflix.spectator.api.DefaultRegistry;
import com.sampullara.cli.Args;
import com.sampullara.cli.Argument;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.SpectatorRegistryFactory;
import io.mantisrx.master.DeadLetterActor;
import io.mantisrx.master.JobClustersManagerActor;
import io.mantisrx.master.JobClustersManagerService;
import io.mantisrx.master.api.akka.MasterApiAkkaService;
import io.mantisrx.master.events.AuditEventBrokerActor;
import io.mantisrx.master.events.AuditEventSubscriber;
import io.mantisrx.master.events.AuditEventSubscriberAkkaImpl;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventBrokerActor;
import io.mantisrx.master.events.StatusEventSubscriber;
import io.mantisrx.master.events.StatusEventSubscriberAkkaImpl;
import io.mantisrx.master.events.WorkerEventSubscriber;
import io.mantisrx.master.events.WorkerMetricsCollector;
import io.mantisrx.master.events.WorkerRegistryV2;
import io.mantisrx.master.resourcecluster.ResourceClustersAkkaImpl;
import io.mantisrx.master.resourcecluster.ResourceClustersHostManagerActor;
import io.mantisrx.master.resourcecluster.resourceprovider.ResourceClusterProviderAdapter;
import io.mantisrx.master.scheduler.AgentsErrorMonitorActor;
import io.mantisrx.master.scheduler.JobMessageRouterImpl;
import io.mantisrx.master.vm.AgentClusterOperationsImpl;
import io.mantisrx.master.zk.LeaderElector;
import io.mantisrx.server.core.BaseService;
import io.mantisrx.server.core.MantisAkkaRpcSystemLoader;
import io.mantisrx.server.core.Service;
import io.mantisrx.server.core.json.DefaultObjectMapper;
import io.mantisrx.server.core.master.LocalMasterMonitor;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.core.metrics.MetricsPublisherService;
import io.mantisrx.server.core.metrics.MetricsServerService;
import io.mantisrx.server.core.zookeeper.CuratorService;
import io.mantisrx.server.master.config.ConfigurationFactory;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.server.master.config.StaticPropertiesConfigurationFactory;
import io.mantisrx.server.master.mesos.MesosDriverSupplier;
import io.mantisrx.server.master.mesos.VirtualMachineMasterServiceMesosImpl;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.persistence.KeyValueBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.resourcecluster.ResourceClusters;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactoryImpl;
import io.mantisrx.server.master.scheduler.WorkerRegistry;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.org.apache.curator.utils.ZKPaths;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.time.Clock;
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.rpc.RpcService;
import org.apache.flink.runtime.rpc.RpcSystem;
import org.apache.flink.runtime.rpc.RpcUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.subjects.PublishSubject;
public class MasterMain implements Service {
private static final Logger logger = LoggerFactory.getLogger(MasterMain.class);
@Argument(alias = "p", description = "Specify a configuration file", required = false)
private static String propFile = "master.properties";
private final ServiceLifecycle mantisServices = new ServiceLifecycle();
private final AtomicBoolean shutdownInitiated = new AtomicBoolean(false);
private KeyValueBasedPersistenceProvider storageProvider;
private CountDownLatch blockUntilShutdown = new CountDownLatch(1);
private volatile CuratorService curatorService = null;
private volatile AgentClusterOperationsImpl agentClusterOps = null;
private MasterConfiguration config;
private SchedulingService schedulingService;
private ILeadershipManager leadershipManager;
public MasterMain(ConfigurationFactory configFactory, AuditEventSubscriber auditEventSubscriber) {
String test = "{\"jobId\":\"sine-function-1\",\"status\":{\"jobId\":\"sine-function-1\",\"stageNum\":1,\"workerIndex\":0,\"workerNumber\":2,\"type\":\"HEARTBEAT\",\"message\":\"heartbeat\",\"state\":\"Noop\",\"hostname\":null,\"timestamp\":1525813363585,\"reason\":\"Normal\",\"payloads\":[{\"type\":\"SubscriptionState\",\"data\":\"false\"},{\"type\":\"IncomingDataDrop\",\"data\":\"{\\\"onNextCount\\\":0,\\\"droppedCount\\\":0}\"}]}}";
Metrics metrics = new Metrics.Builder()
.id("MasterMain")
.addCounter("masterInitSuccess")
.addCounter("masterInitError")
.build();
Metrics m = MetricsRegistry.getInstance().registerAndGet(metrics);
try {
ConfigurationProvider.initialize(configFactory);
this.config = ConfigurationProvider.getConfig();
leadershipManager = new LeadershipManagerZkImpl(config, mantisServices);
Thread t = new Thread(() -> shutdown());
t.setDaemon(true);
// shutdown hook
Runtime.getRuntime().addShutdownHook(t);
// shared state
PublishSubject<String> vmLeaseRescindedSubject = PublishSubject.create();
final ActorSystem system = ActorSystem.create("MantisMaster");
// log the configuration of the actor system
system.logConfiguration();
// log dead letter messages
final ActorRef actor = system.actorOf(Props.create(DeadLetterActor.class), "MantisDeadLetter");
system.eventStream().subscribe(actor, DeadLetter.class);
ActorRef agentsErrorMonitorActor = system.actorOf(AgentsErrorMonitorActor.props(), "AgentsErrorMonitor");
ActorRef statusEventBrokerActor = system.actorOf(StatusEventBrokerActor.props(agentsErrorMonitorActor), "StatusEventBroker");
ActorRef auditEventBrokerActor = system.actorOf(AuditEventBrokerActor.props(auditEventSubscriber), "AuditEventBroker");
final StatusEventSubscriber statusEventSubscriber = new StatusEventSubscriberAkkaImpl(statusEventBrokerActor);
final AuditEventSubscriber auditEventSubscriberAkka = new AuditEventSubscriberAkkaImpl(auditEventBrokerActor);
final WorkerEventSubscriber workerEventSubscriber = WorkerRegistryV2.INSTANCE;
final WorkerMetricsCollector workerMetricsCollector = new WorkerMetricsCollector(
Duration.ofMinutes(5), // cleanup jobs after 5 minutes
Duration.ofMinutes(1), // check every 1 minute for jobs to be cleaned up
Clock.systemDefaultZone());
mantisServices.addService(BaseService.wrap(workerMetricsCollector));
// TODO who watches actors created at this level?
final LifecycleEventPublisher lifecycleEventPublisher =
new LifecycleEventPublisherImpl(auditEventSubscriberAkka, statusEventSubscriber,
workerEventSubscriber.and(workerMetricsCollector));
storageProvider = new KeyValueBasedPersistenceProvider(this.config.getStorageProvider(), lifecycleEventPublisher);
final MantisJobStore mantisJobStore = new MantisJobStore(storageProvider);
final ActorRef jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props(mantisJobStore, lifecycleEventPublisher, config.getJobCostsCalculator()), "JobClustersManager");
final JobMessageRouter jobMessageRouter = new JobMessageRouterImpl(jobClusterManagerActor);
// Beginning of new stuff
Configuration configuration = loadConfiguration();
final ActorRef resourceClustersHostActor = system.actorOf(
ResourceClustersHostManagerActor.props(
new ResourceClusterProviderAdapter(this.config.getResourceClusterProvider(), system),
storageProvider),
"ResourceClusterHostActor");
final RpcSystem rpcSystem =
MantisAkkaRpcSystemLoader.getInstance();
// the RPCService implementation will only be used for communicating with task executors but not for running a server itself.
// Thus, there's no need for any valid external and bind addresses.
final RpcService rpcService =
RpcUtils.createRemoteRpcService(rpcSystem, configuration, null, "6123", null, Optional.empty());
final ResourceClusters resourceClusters =
ResourceClustersAkkaImpl.load(
configFactory,
rpcService,
system,
mantisJobStore,
jobMessageRouter,
resourceClustersHostActor,
storageProvider);
// end of new stuff
final WorkerRegistry workerRegistry = WorkerRegistryV2.INSTANCE;
if (config.getMesosEnabled()) {
final MesosDriverSupplier mesosDriverSupplier = new MesosDriverSupplier(this.config, vmLeaseRescindedSubject,
jobMessageRouter,
workerRegistry);
final VirtualMachineMasterServiceMesosImpl vmService = new VirtualMachineMasterServiceMesosImpl(
this.config,
getDescriptionJson(),
mesosDriverSupplier);
schedulingService = new SchedulingService(jobMessageRouter, workerRegistry, vmLeaseRescindedSubject, vmService);
mesosDriverSupplier.setAddVMLeaseAction(schedulingService::addOffers);
// initialize agents error monitor
agentsErrorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(schedulingService), ActorRef.noSender());
this.agentClusterOps = new AgentClusterOperationsImpl(storageProvider,
jobMessageRouter,
schedulingService,
lifecycleEventPublisher,
ConfigurationProvider.getConfig().getActiveSlaveAttributeName());
// services
mantisServices.addService(vmService);
mantisServices.addService(schedulingService);
mantisServices.addService(agentClusterOps);
}
final MantisSchedulerFactory mantisSchedulerFactory =
new MantisSchedulerFactoryImpl(system, resourceClusters, new ExecuteStageRequestFactory(getConfig()), jobMessageRouter, schedulingService, getConfig(), MetricsRegistry.getInstance());
final boolean loadJobsFromStoreOnInit = true;
final JobClustersManagerService jobClustersManagerService = new JobClustersManagerService(jobClusterManagerActor, mantisSchedulerFactory, loadJobsFromStoreOnInit);
// start serving metrics
if (config.getMasterMetricsPort() > 0) {
new MetricsServerService(config.getMasterMetricsPort(), 1, Collections.emptyMap()).start();
}
new MetricsPublisherService(config.getMetricsPublisher(), config.getMetricsPublisherFrequencyInSeconds(),
new HashMap<>()).start();
// services
mantisServices.addService(jobClustersManagerService);
if (this.config.isLocalMode()) {
mantisServices.addService(new MasterApiAkkaService(new LocalMasterMonitor(leadershipManager.getDescription()), leadershipManager.getDescription(), jobClusterManagerActor, statusEventBrokerActor,
resourceClusters, resourceClustersHostActor, config.getApiPort(), storageProvider, lifecycleEventPublisher, leadershipManager, agentClusterOps));
leadershipManager.becomeLeader();
} else {
curatorService = new CuratorService(this.config);
curatorService.start();
mantisServices.addService(createLeaderElector(curatorService, leadershipManager));
mantisServices.addService(new MasterApiAkkaService(curatorService.getMasterMonitor(), leadershipManager.getDescription(), jobClusterManagerActor, statusEventBrokerActor,
resourceClusters, resourceClustersHostActor, config.getApiPort(), storageProvider, lifecycleEventPublisher, leadershipManager, agentClusterOps));
}
m.getCounter("masterInitSuccess").increment();
} catch (Exception e) {
logger.error("caught exception on Mantis Master initialization", e);
m.getCounter("masterInitError").increment();
shutdown();
System.exit(1);
}
}
private static Properties loadProperties(String propFile) {
// config
Properties props = new Properties();
try (InputStream in = findResourceAsStream(propFile)) {
props.load(in);
} catch (IOException e) {
throw new RuntimeException(String.format("Can't load properties from the given property file %s: %s", propFile, e.getMessage()), e);
}
for (String key : props.stringPropertyNames()) {
String envVarKey = key.toUpperCase().replace('.', '_');
String envValue = System.getenv(envVarKey);
if (envValue != null) {
props.setProperty(key, envValue);
logger.info("Override config from env {}: {}.", key, envValue);
}
}
return props;
}
/**
* Finds the given resource and returns its input stream. This method seeks the file first from the current working directory,
* and then in the class path.
*
* @param resourceName the name of the resource. It can either be a file name, or a path.
*
* @return An {@link java.io.InputStream} instance that represents the found resource. Null otherwise.
*
* @throws FileNotFoundException
*/
private static InputStream findResourceAsStream(String resourceName) throws FileNotFoundException {
File resource = new File(resourceName);
if (resource.exists()) {
return new FileInputStream(resource);
}
InputStream is = Thread.currentThread().getContextClassLoader().getResourceAsStream(resourceName);
if (is == null) {
throw new FileNotFoundException(String.format("Can't find property file %s. Make sure the property file is either in your path or in your classpath ", resourceName));
}
return is;
}
private static void setupDummyAgentClusterAutoScaler() {
final AutoScaleRule dummyAutoScaleRule = new AutoScaleRule() {
@Override
public String getRuleName() {
return "test";
}
@Override
public int getMinIdleHostsToKeep() {
return 1;
}
@Override
public int getMaxIdleHostsToKeep() {
return 10;
}
@Override
public long getCoolDownSecs() {
return 300;
}
@Override
public boolean idleMachineTooSmall(VirtualMachineLease lease) {
return false;
}
};
AgentClustersAutoScaler.initialize(() -> new HashSet<>(Collections.singletonList(dummyAutoScaleRule)), new Observer<AutoScaleAction>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(AutoScaleAction autoScaleAction) {
}
});
}
public static void main(String[] args) {
try {
Args.parse(MasterMain.class, args);
} catch (IllegalArgumentException e) {
Args.usage(MasterMain.class);
System.exit(1);
}
try {
SpectatorRegistryFactory.setRegistry(new DefaultRegistry());
Properties props = new Properties();
props.putAll(System.getenv());
props.putAll(System.getProperties());
props.putAll(loadProperties(propFile));
StaticPropertiesConfigurationFactory factory = new StaticPropertiesConfigurationFactory(props);
setupDummyAgentClusterAutoScaler();
final AuditEventSubscriber auditEventSubscriber = new AuditEventSubscriberLoggingImpl();
MasterMain master = new MasterMain(factory, auditEventSubscriber);
master.start(); // blocks until shutdown hook (ctrl-c)
} catch (Exception e) {
// unexpected to get a RuntimeException, will exit
logger.error("Unexpected error: " + e.getMessage(), e);
System.exit(2);
}
}
private LeaderElector createLeaderElector(CuratorService curatorService,
ILeadershipManager leadershipManager) {
return LeaderElector.builder(leadershipManager)
.withCurator(curatorService.getCurator())
.withJsonMapper(DefaultObjectMapper.getInstance())
.withElectionPath(ZKPaths.makePath(config.getZkRoot(), config.getLeaderElectionPath()))
.withAnnouncementPath(ZKPaths.makePath(config.getZkRoot(), config.getLeaderAnnouncementPath()))
.build();
}
@Override
public void start() {
logger.info("Starting Mantis Master");
mantisServices.start();
try {
blockUntilShutdown.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
@Override
public void enterActiveMode() {
}
@Override
public void shutdown() {
if (shutdownInitiated.compareAndSet(false, true)) {
logger.info("Shutting down Mantis Master");
mantisServices.shutdown();
logger.info("mantis services shutdown complete");
boolean shutdownCuratorEnabled = ConfigurationProvider.getConfig().getShutdownCuratorServiceEnabled();
if (curatorService != null && shutdownCuratorEnabled) {
logger.info("Shutting down Curator Service");
curatorService.shutdown();
} else {
logger.info("not shutting down curator service {} shutdownEnabled? {}", curatorService, shutdownCuratorEnabled);
}
blockUntilShutdown.countDown();
logger.info("Mantis Master shutdown done");
} else
logger.info("Shutdown already initiated, not starting again");
}
public MasterConfiguration getConfig() {
return config;
}
public String getDescriptionJson() {
try {
return DefaultObjectMapper.getInstance().writeValueAsString(leadershipManager.getDescription());
} catch (JsonProcessingException e) {
throw new IllegalStateException(String.format("Failed to convert the description %s to JSON: %s", leadershipManager.getDescription(), e.getMessage()), e);
}
}
public AgentClusterOperationsImpl getAgentClusterOps() {
return agentClusterOps;
}
public Consumer<String> getAgentVMEnabler() {
if (schedulingService != null) {
return schedulingService::enableVM;
} else {
return null;
}
}
public Observable<MasterDescription> getMasterObservable() {
return curatorService == null ?
Observable.empty() :
curatorService.getMasterMonitor().getMasterObservable();
}
public boolean isLeader() {
return leadershipManager.isLeader();
}
public IMantisPersistenceProvider getStorageProvider() {
return storageProvider;
}
}
| 7,950 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/ServiceLifecycle.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import io.mantisrx.server.core.BaseService;
import java.util.Iterator;
import java.util.LinkedList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Manage lifecycle of services. Services added in an order are started in the same order and shutdown in the reverse
* order. Each service being added is implicitly given the previously added service as its predecessor. This class
* currently represents a model of a linear list of services, each depending on only the previous service in the list.
*/
public class ServiceLifecycle {
private static final Logger logger = LoggerFactory.getLogger(ServiceLifecycle.class);
private LinkedList<BaseService> servicesList = new LinkedList<BaseService>();
void addService(BaseService service) {
if (!servicesList.isEmpty()) {
service.addPredecessor(servicesList.getLast());
}
servicesList.add(service);
}
void start() {
for (BaseService service : servicesList) {
try {
logger.info("Starting service " + service.getMyServiceCount() + ": " + service);
service.start();
logger.info("Successfully started service " + service.getMyServiceCount() + ": " + service);
} catch (Exception e) {
logger.error(String.format("Failed to start service %d: %s: %s", service.getMyServiceCount(), service, e.getMessage()), e);
throw e;
}
}
}
void becomeLeader() {
for (BaseService service : servicesList) {
service.enterActiveMode();
}
}
void shutdown() {
if (!servicesList.isEmpty()) {
Iterator<BaseService> iterator = servicesList.descendingIterator();
while (iterator.hasNext()) {
BaseService service = iterator.next();
logger.info("Shutting down service " + service.getMyServiceCount() + ": " + service);
service.shutdown();
logger.info("Successfully shut down service " + service.getMyServiceCount() + ": " + service);
}
}
}
} | 7,951 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/SchedulingService.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import static io.mantisrx.server.master.scheduler.ScheduleRequest.DEFAULT_Q_ATTRIBUTES;
import com.netflix.fenzo.AutoScaleAction;
import com.netflix.fenzo.AutoScaleRule;
import com.netflix.fenzo.SchedulingResult;
import com.netflix.fenzo.TaskAssignmentResult;
import com.netflix.fenzo.TaskRequest;
import com.netflix.fenzo.TaskScheduler;
import com.netflix.fenzo.TaskSchedulingService;
import com.netflix.fenzo.VMAssignmentResult;
import com.netflix.fenzo.VirtualMachineCurrentState;
import com.netflix.fenzo.VirtualMachineLease;
import com.netflix.fenzo.queues.TaskQueue;
import com.netflix.fenzo.queues.TaskQueueException;
import com.netflix.fenzo.queues.tiered.TieredQueue;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.GaugeCallback;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.common.metrics.spectator.MetricId;
import io.mantisrx.common.metrics.spectator.SpectatorRegistryFactory;
import io.mantisrx.server.core.BaseService;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import io.mantisrx.server.master.scheduler.LaunchTaskRequest;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.ScheduleRequest;
import io.mantisrx.server.master.scheduler.SchedulingStateManager;
import io.mantisrx.server.master.scheduler.WorkerLaunchFailed;
import io.mantisrx.server.master.scheduler.WorkerLaunched;
import io.mantisrx.server.master.scheduler.WorkerRegistry;
import io.mantisrx.server.master.scheduler.WorkerUnscheduleable;
import io.mantisrx.shaded.com.google.common.collect.Sets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.HdrHistogram.SynchronizedHistogram;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.functions.Action1;
import rx.schedulers.Schedulers;
//import io.mantisrx.server.core.domain.WorkerPorts;
public class SchedulingService extends BaseService implements MantisScheduler {
private static final Logger logger = LoggerFactory.getLogger(SchedulingService.class);
private final JobMessageRouter jobMessageRouter;
private final WorkerRegistry workerRegistry;
private final TaskScheduler taskScheduler;
private final TaskSchedulingService taskSchedulingService;
private final TieredQueue taskQueue;
private final Counter numWorkersLaunched;
private final Counter numResourceOffersReceived;
private final Counter numResourceAllocations;
private final Counter numResourceOffersRejected;
private final Gauge workersToLaunch;
private final Gauge pendingWorkers;
private final Gauge schedulerRunMillis;
private final Counter perWorkerSchedulingTimeMs;
private final SynchronizedHistogram workerAcceptedToLaunchedDistMs = new SynchronizedHistogram(3_600_000L, 3);
private final Gauge totalActiveAgents;
private final Counter numAgentsUsed;
private final Gauge idleAgents;
private final Gauge totalAvailableCPUs;
private final Gauge totalAllocatedCPUs;
private final Gauge totalAvailableMemory;
private final Gauge totalAllocatedMemory;
private final Gauge totalAvailableNwMbps;
private final Gauge totalAllocatedNwMbps;
private final Gauge cpuUtilization;
private final Gauge memoryUtilization;
private final Gauge networkUtilization;
private final Gauge dominantResUtilization;
private final Gauge fenzoLaunchedTasks;
private final Gauge jobMgrRunningWorkers;
private final Counter numAutoScaleUpActions;
private final Counter numAutoScaleDownActions;
private final Counter numMissingWorkerPorts;
private final Counter schedulingResultExceptions;
private final Counter schedulingCallbackExceptions;
private final SchedulingStateManager schedulingState;
private final AtomicInteger idleMachinesCount = new AtomicInteger();
private final String slaveClusterAttributeName;
private final long vmCurrentStatesCheckInterval = 10000;
private final AtomicLong lastVmCurrentStatesCheckDone = new AtomicLong(System.currentTimeMillis());
private VirtualMachineMasterService virtualMachineService;
private long SCHEDULING_ITERATION_INTERVAL_MILLIS = 50;
private long MAX_DELAY_MILLIS_BETWEEN_SCHEDULING_ITER = 5_000;
private AtomicLong lastSchedulingResultCallback = new AtomicLong(System.currentTimeMillis());
public SchedulingService(final JobMessageRouter jobMessageRouter,
final WorkerRegistry workerRegistry,
final Observable<String> vmLeaseRescindedObservable,
final VirtualMachineMasterService virtualMachineService) {
super(true);
this.schedulingState = new SchedulingStateManager();
this.jobMessageRouter = jobMessageRouter;
this.workerRegistry = workerRegistry;
this.virtualMachineService = virtualMachineService;
this.slaveClusterAttributeName = ConfigurationProvider.getConfig().getSlaveClusterAttributeName();
SCHEDULING_ITERATION_INTERVAL_MILLIS = ConfigurationProvider.getConfig().getSchedulerIterationIntervalMillis();
AgentFitnessCalculator agentFitnessCalculator = new AgentFitnessCalculator();
TaskScheduler.Builder schedulerBuilder = new TaskScheduler.Builder()
.withLeaseRejectAction(virtualMachineService::rejectLease)
.withLeaseOfferExpirySecs(ConfigurationProvider.getConfig().getMesosLeaseOfferExpirySecs())
.withFitnessCalculator(agentFitnessCalculator)
.withFitnessGoodEnoughFunction(agentFitnessCalculator.getFitnessGoodEnoughFunc())
.withAutoScaleByAttributeName(ConfigurationProvider.getConfig().getAutoscaleByAttributeName()); // set this always
if (ConfigurationProvider.getConfig().getDisableShortfallEvaluation())
schedulerBuilder = schedulerBuilder.disableShortfallEvaluation();
taskScheduler = setupTaskSchedulerAndAutoScaler(vmLeaseRescindedObservable, schedulerBuilder);
taskScheduler.setActiveVmGroupAttributeName(ConfigurationProvider.getConfig().getActiveSlaveAttributeName());
taskQueue = new TieredQueue(2);
taskSchedulingService = setupTaskSchedulingService(taskScheduler);
setupAutoscaleRulesDynamicUpdater();
MetricGroupId metricGroupId = new MetricGroupId(SchedulingService.class.getCanonicalName());
Metrics m = new Metrics.Builder()
.id(metricGroupId)
.addCounter("numWorkersLaunched")
.addCounter("numResourceOffersReceived")
.addCounter("numResourceAllocations")
.addCounter("numResourceOffersRejected")
.addGauge("workersToLaunch")
.addGauge("pendingWorkers")
.addGauge("schedulerRunMillis")
.addCounter("perWorkerSchedulingTimeMillis")
.addGauge(new GaugeCallback(metricGroupId, "workerAcceptedToLaunchedMsP50", () -> (double) workerAcceptedToLaunchedDistMs.getValueAtPercentile(50)))
.addGauge(new GaugeCallback(metricGroupId, "workerAcceptedToLaunchedMsP95", () -> (double) workerAcceptedToLaunchedDistMs.getValueAtPercentile(95)))
.addGauge(new GaugeCallback(metricGroupId, "workerAcceptedToLaunchedMsP99", () -> (double) workerAcceptedToLaunchedDistMs.getValueAtPercentile(99)))
.addGauge(new GaugeCallback(metricGroupId, "workerAcceptedToLaunchedMsMax", () -> (double) workerAcceptedToLaunchedDistMs.getValueAtPercentile(100)))
.addGauge("totalActiveAgents")
.addCounter("numAgentsUsed")
.addGauge("idleAgents")
.addGauge("totalAvailableCPUs")
.addGauge("totalAllocatedCPUs")
.addGauge("totalAvailableMemory")
.addGauge("totalAllocatedMemory")
.addGauge("totalAvailableNwMbps")
.addGauge("totalAllocatedNwMbps")
.addGauge("cpuUtilization")
.addGauge("memoryUtilization")
.addGauge("networkUtilization")
.addGauge("dominantResUtilization")
.addCounter("numAutoScaleUpActions")
.addCounter("numAutoScaleDownActions")
.addGauge("fenzoLaunchedTasks")
.addGauge("jobMgrRunningWorkers")
.addCounter("numMissingWorkerPorts")
.addCounter("schedulingResultExceptions")
.addCounter("schedulingCallbackExceptions")
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
numWorkersLaunched = m.getCounter("numWorkersLaunched");
numResourceOffersReceived = m.getCounter("numResourceOffersReceived");
numResourceAllocations = m.getCounter("numResourceAllocations");
numResourceOffersRejected = m.getCounter("numResourceOffersRejected");
workersToLaunch = m.getGauge("workersToLaunch");
pendingWorkers = m.getGauge("pendingWorkers");
schedulerRunMillis = m.getGauge("schedulerRunMillis");
totalActiveAgents = m.getGauge("totalActiveAgents");
numAgentsUsed = m.getCounter("numAgentsUsed");
idleAgents = m.getGauge("idleAgents");
totalAvailableCPUs = m.getGauge("totalAvailableCPUs");
totalAllocatedCPUs = m.getGauge("totalAllocatedCPUs");
totalAvailableMemory = m.getGauge("totalAvailableMemory");
totalAllocatedMemory = m.getGauge("totalAllocatedMemory");
totalAvailableNwMbps = m.getGauge("totalAvailableNwMbps");
totalAllocatedNwMbps = m.getGauge("totalAllocatedNwMbps");
cpuUtilization = m.getGauge("cpuUtilization");
memoryUtilization = m.getGauge("memoryUtilization");
networkUtilization = m.getGauge("networkUtilization");
dominantResUtilization = m.getGauge("dominantResUtilization");
numAutoScaleUpActions = m.getCounter("numAutoScaleUpActions");
numAutoScaleDownActions = m.getCounter("numAutoScaleDownActions");
fenzoLaunchedTasks = m.getGauge("fenzoLaunchedTasks");
jobMgrRunningWorkers = m.getGauge("jobMgrRunningWorkers");
numMissingWorkerPorts = m.getCounter("numMissingWorkerPorts");
schedulingResultExceptions = m.getCounter("schedulingResultExceptions");
schedulingCallbackExceptions = m.getCounter("schedulingCallbackExceptions");
perWorkerSchedulingTimeMs = m.getCounter("perWorkerSchedulingTimeMillis");
}
private TaskScheduler setupTaskSchedulerAndAutoScaler(Observable<String> vmLeaseRescindedObservable,
TaskScheduler.Builder schedulerBuilder) {
int minMinIdle = 4;
schedulerBuilder = schedulerBuilder
.withAutoScaleDownBalancedByAttributeName(ConfigurationProvider.getConfig().getHostZoneAttributeName())
.withAutoScalerMapHostnameAttributeName(ConfigurationProvider.getConfig().getAutoScalerMapHostnameAttributeName());
final AgentClustersAutoScaler agentClustersAutoScaler = AgentClustersAutoScaler.get();
try {
if (agentClustersAutoScaler != null) {
Set<AutoScaleRule> rules = agentClustersAutoScaler.getRules();
if (rules != null && !rules.isEmpty()) {
for (AutoScaleRule rule : rules) {
schedulerBuilder = schedulerBuilder.withAutoScaleRule(rule);
minMinIdle = Math.min(minMinIdle, rule.getMinIdleHostsToKeep());
}
} else
logger.warn("No auto scale rules setup");
}
} catch (IllegalStateException e) {
logger.warn("Ignoring: " + e.getMessage());
}
schedulerBuilder = schedulerBuilder.withMaxOffersToReject(Math.max(1, minMinIdle));
final TaskScheduler scheduler = schedulerBuilder.build();
vmLeaseRescindedObservable
.doOnNext(new Action1<String>() {
@Override
public void call(String s) {
if (s.equals("ALL"))
scheduler.expireAllLeases();
else
scheduler.expireLease(s);
}
})
.subscribe();
if (agentClustersAutoScaler != null) {
final Observer<AutoScaleAction> autoScaleActionObserver = agentClustersAutoScaler.getAutoScaleActionObserver();
scheduler.setAutoscalerCallback(new com.netflix.fenzo.functions.Action1<AutoScaleAction>() {
@Override
public void call(AutoScaleAction action) {
try {
switch (action.getType()) {
case Up:
numAutoScaleUpActions.increment();
break;
case Down:
numAutoScaleDownActions.increment();
break;
}
autoScaleActionObserver.onNext(action);
} catch (Exception e) {
logger.warn("Will continue after exception calling autoscale action observer: " + e.getMessage(), e);
}
}
});
}
return scheduler;
}
private void setupAutoscaleRulesDynamicUpdater() {
final Set<String> emptyHashSet = new HashSet<>();
Schedulers.computation().createWorker().schedulePeriodically(() -> {
try {
logger.debug("Updating cluster autoscale rules");
final AgentClustersAutoScaler agentClustersAutoScaler = AgentClustersAutoScaler.get();
if (agentClustersAutoScaler == null) {
logger.warn("No agent cluster autoscaler defined, not setting up Fenzo autoscaler rules");
return;
}
final Set<AutoScaleRule> newRules = agentClustersAutoScaler.getRules();
final Collection<AutoScaleRule> currRules = taskScheduler.getAutoScaleRules();
final Set<String> currRulesNames = currRules == null || currRules.isEmpty() ?
emptyHashSet :
currRules.stream().collect((Supplier<Set<String>>) HashSet::new,
(strings, autoScaleRule) -> strings.add(autoScaleRule.getRuleName()),
Set::addAll);
if (newRules != null && !newRules.isEmpty()) {
for (AutoScaleRule r : newRules) {
logger.debug("Setting up autoscale rule: " + r);
taskScheduler.addOrReplaceAutoScaleRule(r);
currRulesNames.remove(r.getRuleName());
}
}
if (!currRulesNames.isEmpty()) {
for (String ruleName : currRulesNames) {
logger.info("Removing autoscale rule " + ruleName);
taskScheduler.removeAutoScaleRule(ruleName);
}
}
} catch (Exception e) {
logger.warn("Unexpected error updating cluster autoscale rules: " + e.getMessage());
}
}, 1, 1, TimeUnit.MINUTES);
}
private TaskSchedulingService setupTaskSchedulingService(TaskScheduler taskScheduler) {
TaskSchedulingService.Builder builder = new TaskSchedulingService.Builder()
.withTaskScheduler(taskScheduler)
.withLoopIntervalMillis(SCHEDULING_ITERATION_INTERVAL_MILLIS)
.withMaxDelayMillis(MAX_DELAY_MILLIS_BETWEEN_SCHEDULING_ITER) // sort of rate limiting when no assignments were made and no new offers available
.withSchedulingResultCallback(this::schedulingResultHandler)
.withTaskQueue(taskQueue)
.withOptimizingShortfallEvaluator();
return builder.build();
}
private Optional<String> getAttribute(final VirtualMachineLease lease, final String attributeName) {
boolean hasValue = lease.getAttributeMap() != null
&& lease.getAttributeMap().get(attributeName) != null
&& lease.getAttributeMap().get(attributeName).getText().hasValue();
return hasValue ? Optional.of(lease.getAttributeMap().get(attributeName).getText().getValue()) : Optional.empty();
}
/**
* Attempts to launch tasks given some number of leases from Mesos.
*
* When a task is launched successfully, the following will happen:
*
* 1. Emit a {@link WorkerLaunched} event to be handled by the corresponding actor.
* 2. Makes a call to the underlying Mesos driver to launch the task.
*
* A task can fail to launch if:
*
* 1. It doesn't receive enough metadata for {@link WorkerPorts} to pass its preconditions.
* - No launch task request will be made for this assignment result.
* - Proactively unschedule the worker.
* 2. It fails to emit a {@link WorkerLaunched} event.
* - The worker will get unscheduled for this launch task request.
* 3. There are no launch tasks for this assignment result.
* - All of these leases are rejected.
* - Eventually, the underlying Mesos driver will decline offers since there are no launch task requests.
*
* @param requests collection of assignment results received by the scheduler.
* @param leases list of resource offers from Mesos.
*/
private void launchTasks(Collection<TaskAssignmentResult> requests, List<VirtualMachineLease> leases) {
List<LaunchTaskRequest> launchTaskRequests = new ArrayList<>();
for (TaskAssignmentResult assignmentResult : requests) {
ScheduleRequest request = (ScheduleRequest) assignmentResult.getRequest();
WorkerPorts workerPorts = null;
try {
workerPorts = new WorkerPorts(assignmentResult.getAssignedPorts());
} catch (IllegalArgumentException | IllegalStateException e) {
logger.error("problem launching tasks for assignment result {}: {}", assignmentResult, e);
numMissingWorkerPorts.increment();
}
if (workerPorts != null) {
boolean success = jobMessageRouter.routeWorkerEvent(new WorkerLaunched(
request.getWorkerId(),
request.getStageNum(),
leases.get(0).hostname(),
leases.get(0).getVMID(),
getAttribute(leases.get(0), slaveClusterAttributeName),
Optional.empty(),
workerPorts));
if (success) {
launchTaskRequests.add(new LaunchTaskRequest(request, workerPorts));
} else {
unscheduleWorker(request.getWorkerId(), Optional.ofNullable(leases.get(0).hostname()));
}
} else {
unscheduleWorker(request.getWorkerId(), Optional.ofNullable(leases.get(0).hostname()));
}
}
if (launchTaskRequests.isEmpty()) {
for (VirtualMachineLease l : leases)
virtualMachineService.rejectLease(l);
}
Map<ScheduleRequest, LaunchTaskException> launchErrors = virtualMachineService.launchTasks(launchTaskRequests, leases);
for (TaskAssignmentResult result : requests) {
final ScheduleRequest sre = (ScheduleRequest) result.getRequest();
if (launchErrors.containsKey(sre)) {
String errorMessage = getWorkerStringPrefix(sre.getStageNum(), sre.getWorkerId()) +
" failed due to " + launchErrors.get(sre).getMessage();
boolean success = jobMessageRouter.routeWorkerEvent(new WorkerLaunchFailed(sre.getWorkerId(), sre.getStageNum(), errorMessage));
if (!success) {
logger.warn("Failed to route WorkerLaunchFailed for {} (err {})", sre.getWorkerId(), errorMessage);
}
}
}
}
private String getWorkerStringPrefix(int stageNum, final WorkerId workerId) {
return "stage " + stageNum + " worker index=" + workerId.getWorkerIndex() + " number=" + workerId.getWorkerNum();
}
private void schedulingResultHandler(SchedulingResult schedulingResult) {
try {
lastSchedulingResultCallback.set(System.currentTimeMillis());
final List<Exception> exceptions = schedulingResult.getExceptions();
for (Exception exc : exceptions) {
logger.error("Scheduling result got exception: {}", exc.getMessage(), exc);
schedulingResultExceptions.increment();
}
int workersLaunched = 0;
SchedulerCounters.getInstance().incrementResourceAllocationTrials(schedulingResult.getNumAllocations());
Map<String, VMAssignmentResult> assignmentResultMap = schedulingResult.getResultMap();
final int assignmentResultSize;
if (assignmentResultMap != null) {
assignmentResultSize = assignmentResultMap.size();
long now = System.currentTimeMillis();
for (Map.Entry<String, VMAssignmentResult> aResult : assignmentResultMap.entrySet()) {
launchTasks(aResult.getValue().getTasksAssigned(), aResult.getValue().getLeasesUsed());
for (TaskAssignmentResult r : aResult.getValue().getTasksAssigned()) {
final ScheduleRequest request = (ScheduleRequest) r.getRequest();
final Optional<Long> acceptedAt = workerRegistry.getAcceptedAt(request.getWorkerId());
acceptedAt.ifPresent(acceptedAtTime -> workerAcceptedToLaunchedDistMs.recordValue(now - acceptedAtTime));
perWorkerSchedulingTimeMs.increment(now - request.getReadyAt());
}
workersLaunched += aResult.getValue().getTasksAssigned().size();
}
} else {
assignmentResultSize = 0;
}
// for workers that didn't get scheduled, rate limit them
for (Map.Entry<TaskRequest, List<TaskAssignmentResult>> entry : schedulingResult.getFailures().entrySet()) {
final ScheduleRequest req = (ScheduleRequest) entry.getKey();
boolean success = jobMessageRouter.routeWorkerEvent(new WorkerUnscheduleable(req.getWorkerId(), req.getStageNum()));
if (!success) {
logger.warn("Failed to route {} WorkerUnscheduleable event", req.getWorkerId());
if (logger.isTraceEnabled()) {
logger.trace("Unscheduleable worker {} assignmentresults {}", req.getWorkerId(), entry.getValue());
}
}
}
numWorkersLaunched.increment(workersLaunched);
numResourceOffersReceived.increment(schedulingResult.getLeasesAdded());
numResourceAllocations.increment(schedulingResult.getNumAllocations());
numResourceOffersRejected.increment(schedulingResult.getLeasesRejected());
final int requestedWorkers = workersLaunched + schedulingResult.getFailures().size();
workersToLaunch.set(requestedWorkers);
pendingWorkers.set(schedulingResult.getFailures().size());
schedulerRunMillis.set(schedulingResult.getRuntime());
totalActiveAgents.set(schedulingResult.getTotalVMsCount());
numAgentsUsed.increment(assignmentResultSize);
final int idleVMsCount = schedulingResult.getIdleVMsCount();
idleAgents.set(idleVMsCount);
SchedulerCounters.getInstance().endIteration(requestedWorkers, workersLaunched, assignmentResultSize,
schedulingResult.getLeasesRejected());
if (requestedWorkers > 0 && SchedulerCounters.getInstance().getCounter().getIterationNumber() % 10 == 0) {
logger.info("Scheduling iteration result: " + SchedulerCounters.getInstance().toJsonString());
}
if (idleVMsCount != idleMachinesCount.get()) {
logger.info("Idle machines: " + idleVMsCount);
idleMachinesCount.set(idleVMsCount);
}
try {
taskSchedulingService.requestVmCurrentStates(vmCurrentStates -> {
if (lastVmCurrentStatesCheckDone.get() < (System.currentTimeMillis() - vmCurrentStatesCheckInterval)) {
schedulingState.setVMCurrentState(vmCurrentStates);
verifyAndReportResUsageMetrics(vmCurrentStates);
lastVmCurrentStatesCheckDone.set(System.currentTimeMillis());
}
});
} catch (final TaskQueueException e) {
logger.warn("got exception requesting VM states from Fenzo", e);
}
publishJobManagerAndFenzoWorkerMetrics();
} catch (final Exception e) {
logger.error("unexpected exception in scheduling result callback", e);
schedulingCallbackExceptions.increment();
}
}
@Override
public void initializeRunningWorker(final ScheduleRequest request, String hostname, String hostID) {
taskSchedulingService.initializeRunningTask(request, hostname);
}
@Override
public void scheduleWorker(final ScheduleRequest scheduleRequest) {
taskQueue.queueTask(scheduleRequest);
}
@Override
public void unscheduleWorker(final WorkerId workerId, final Optional<String> hostname) {
taskSchedulingService.removeTask(workerId.getId(), DEFAULT_Q_ATTRIBUTES, hostname.orElse(null));
}
@Override
public void unscheduleAndTerminateWorker(final WorkerId workerId, final Optional<String> hostname) {
taskSchedulingService.removeTask(workerId.getId(), DEFAULT_Q_ATTRIBUTES, hostname.orElse(null));
virtualMachineService.killTask(workerId);
}
@Override
public void updateWorkerSchedulingReadyTime(WorkerId workerId, long when) {
if (logger.isTraceEnabled()) {
logger.trace("setting task {} ready time to {}", workerId, new DateTime(when));
}
taskSchedulingService.setTaskReadyTime(workerId.toString(), DEFAULT_Q_ATTRIBUTES, when);
}
@Override
public void rescindOffer(final String offerId) {
if (offerId.equals("ALL")) {
taskScheduler.expireAllLeases();
} else {
taskScheduler.expireLease(offerId);
}
}
@Override
public void addOffers(final List<VirtualMachineLease> offers) {
taskSchedulingService.addLeases(offers);
}
@Override
public void rescindOffers(final String hostname) {
taskScheduler.expireAllLeases(hostname);
}
@Override
public void disableVM(String hostname, long durationMillis) throws IllegalStateException {
taskScheduler.disableVM(hostname, durationMillis);
}
@Override
public void enableVM(final String hostname) {
taskScheduler.enableVM(hostname);
}
@Override
public List<VirtualMachineCurrentState> getCurrentVMState() {
return schedulingState.getVMCurrentState();
}
@Override
public void setActiveVmGroups(final List<String> activeVmGroups) {
if (activeVmGroups != null) {
taskScheduler.setActiveVmGroups(activeVmGroups);
}
}
private void setupSchedulingServiceWatcherMetric() {
logger.info("Setting up SchedulingServiceWatcher metrics");
lastSchedulingResultCallback.set(System.currentTimeMillis());
final String metricGroup = "SchedulingServiceWatcher";
final GaugeCallback timeSinceLastSchedulingRunGauge = new GaugeCallback(new MetricId(metricGroup, "timeSinceLastSchedulingRunMs"),
() -> (double) (System.currentTimeMillis() - lastSchedulingResultCallback.get()),
SpectatorRegistryFactory.getRegistry());
final Metrics schedulingServiceWatcherMetrics = new Metrics.Builder()
.id(metricGroup)
.addGauge(timeSinceLastSchedulingRunGauge)
.build();
MetricsRegistry.getInstance().registerAndGet(schedulingServiceWatcherMetrics);
}
@Override
public void start() {
super.awaitActiveModeAndStart(() -> {
logger.info("Scheduling service starting now");
taskSchedulingService.start();
setupSchedulingServiceWatcherMetric();
if (logger.isDebugEnabled()) {
try {
taskSchedulingService.requestAllTasks(taskStateCollectionMap -> taskStateCollectionMap.forEach((state, tasks) -> {
logger.debug("state {} tasks {}", state, tasks.toString());
}));
} catch (TaskQueueException e) {
logger.error("caught exception", e);
}
}
});
}
private void publishJobManagerAndFenzoWorkerMetrics() {
try {
taskSchedulingService.requestAllTasks(taskStateCollectionMap -> taskStateCollectionMap.forEach((state, tasks) -> {
final int fenzoTaskSetSize = tasks.size();
if (state == TaskQueue.TaskState.LAUNCHED) {
final int numRunningWorkers = workerRegistry.getNumRunningWorkers(null);
fenzoLaunchedTasks.set(fenzoTaskSetSize);
jobMgrRunningWorkers.set(numRunningWorkers);
if (numRunningWorkers != fenzoTaskSetSize) {
logger.error("{} running workers as per Job Manager, {} tasks launched as per Fenzo", numRunningWorkers, fenzoTaskSetSize);
if (logger.isDebugEnabled()) {
final Set<String> jobMgrWorkers = workerRegistry.getAllRunningWorkers(null)
.stream()
.map(w -> w.getId())
.collect(Collectors.toSet());
final Set<String> fenzoWorkers = tasks
.stream()
.map(t -> t.getId())
.collect(Collectors.toSet());
final Sets.SetView<String> extraJobMgrWorkers = Sets.difference(jobMgrWorkers, fenzoWorkers);
logger.info("Job Manager workers not in Fenzo {}", extraJobMgrWorkers);
final Sets.SetView<String> extraFenzoWorkers = Sets.difference(fenzoWorkers, jobMgrWorkers);
logger.info("Fenzo workers not in JobManagers {}", extraFenzoWorkers);
}
}
} else {
logger.debug("{} {} tasks {}", fenzoTaskSetSize, state, tasks);
}
}));
} catch (Exception e) {
logger.error("caught exception when publishing worker metrics", e);
}
}
private void verifyAndReportResUsageMetrics(List<VirtualMachineCurrentState> vmCurrentStates) {
double totalCPU = 0.0;
double usedCPU = 0.0;
double totalMemory = 0.0;
double usedMemory = 0.0;
double totalNwMbps = 0.0;
double usedNwMbps = 0.0;
for (VirtualMachineCurrentState state : vmCurrentStates) {
final VirtualMachineLease currAvailableResources = state.getCurrAvailableResources();
if (currAvailableResources != null) {
totalCPU += currAvailableResources.cpuCores();
totalMemory += currAvailableResources.memoryMB();
totalNwMbps += currAvailableResources.networkMbps();
}
final Collection<TaskRequest> runningTasks = state.getRunningTasks();
if (runningTasks != null) {
for (TaskRequest t : runningTasks) {
Optional<WorkerId> workerId = WorkerId.fromId(t.getId());
if (!workerId.isPresent() || !workerRegistry.isWorkerValid(workerId.get())) {
taskSchedulingService.removeTask(t.getId(), DEFAULT_Q_ATTRIBUTES, state.getHostname());
} else {
usedCPU += t.getCPUs();
totalCPU += t.getCPUs();
usedMemory += t.getMemory();
totalMemory += t.getMemory();
usedNwMbps += t.getNetworkMbps();
totalNwMbps += t.getNetworkMbps();
}
}
}
}
totalAvailableCPUs.set((long) totalCPU);
totalAllocatedCPUs.set((long) usedCPU);
cpuUtilization.set((long) (usedCPU * 100.0 / totalCPU));
double DRU = usedCPU * 100.0 / totalCPU;
totalAvailableMemory.set((long) totalMemory);
totalAllocatedMemory.set((long) usedMemory);
memoryUtilization.set((long) (usedMemory * 100.0 / totalMemory));
DRU = Math.max(DRU, usedMemory * 100.0 / totalMemory);
totalAvailableNwMbps.set((long) totalNwMbps);
totalAllocatedNwMbps.set((long) usedNwMbps);
networkUtilization.set((long) (usedNwMbps * 100.0 / totalNwMbps));
DRU = Math.max(DRU, usedNwMbps * 100.0 / totalNwMbps);
dominantResUtilization.set((long) DRU);
}
@Override
public void shutdown() {
if (!taskSchedulingService.isShutdown()) {
logger.info("shutting down Task Scheduling Service");
taskSchedulingService.shutdown();
}
}
}
| 7,952 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/ILeadershipManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import io.mantisrx.server.core.master.MasterDescription;
public interface ILeadershipManager {
// execute all actions when becoming a leader
void becomeLeader();
// actions to execute when losing leadership
void stopBeingLeader();
// am I the current leader?
boolean isLeader();
// check if leader is bootstrapped and ready after becoming leader
boolean isReady();
// set Leader is bootstrapped and ready
void setLeaderReady();
// return MasterDescription of node executing this function
MasterDescription getDescription();
}
| 7,953 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/LeaderRedirectionFilter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import akka.http.javadsl.model.StatusCodes;
import akka.http.javadsl.model.Uri;
import akka.http.javadsl.server.AllDirectives;
import akka.http.javadsl.server.Route;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.core.master.MasterMonitor;
import java.net.InetAddress;
import java.net.UnknownHostException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class LeaderRedirectionFilter extends AllDirectives {
public static final Logger logger = LoggerFactory.getLogger(LeaderRedirectionFilter.class);
private final MasterMonitor masterMonitor;
private final ILeadershipManager leadershipManager;
private final Counter api503MasterNotReady;
private final Counter apiRedirectsToLeader;
public LeaderRedirectionFilter(final MasterMonitor masterMonitor, final ILeadershipManager leadershipManager) {
this.masterMonitor = masterMonitor;
this.leadershipManager = leadershipManager;
Metrics m = new Metrics.Builder()
.id("LeaderRedirectionFilter")
.addCounter("api503MasterNotReady")
.addCounter("apiRedirectsToLeader")
.build();
this.api503MasterNotReady = m.getCounter("api503MasterNotReady");
this.apiRedirectsToLeader = m.getCounter("apiRedirectsToLeader");
}
private boolean isLocalHost(MasterDescription master) {
try {
InetAddress localHost = InetAddress.getLocalHost();
for (InetAddress addr : InetAddress.getAllByName(master.getHostname())) {
if (addr.equals(localHost)) {
return true;
}
}
} catch (UnknownHostException e) {
//logger.warn("Failed to compare if given master {} is local host: {}", master, e);
return false;
}
return false;
}
public Route redirectIfNotLeader(final Route leaderRoute) {
MasterDescription latestMaster = masterMonitor.getLatestMaster();
if (leadershipManager.isLeader() || isLocalHost(latestMaster)) {
if (leadershipManager.isReady()) {
return leaderRoute;
} else {
return extractUri(uri -> {
logger.info("leader is not ready, returning 503 for {}", uri);
api503MasterNotReady.increment();
return complete(StatusCodes.SERVICE_UNAVAILABLE, "Mantis master awaiting to be ready");
});
}
} else {
String hostname = latestMaster.getHostname();
int apiPort = latestMaster.getApiPort();
return extractUri(uri -> {
Uri redirectUri = uri.host(hostname).port(apiPort);
apiRedirectsToLeader.increment();
logger.info("redirecting request {} to leader", redirectUri.toString());
return redirect(redirectUri, StatusCodes.FOUND);
});
}
}
public Route rejectIfNotLeader(final Route leaderRoute) {
MasterDescription latestMaster = masterMonitor.getLatestMaster();
if (!leadershipManager.isLeader() && !isLocalHost(latestMaster)) {
return extractUri(uri -> {
logger.info("not leader, returning 500 for {}", uri);
return complete(StatusCodes.INTERNAL_SERVER_ERROR, "this node is not leader");
});
} else {
if (leadershipManager.isReady()) {
return leaderRoute;
} else {
return extractUri(uri -> {
logger.info("leader is not ready, returning 503 for {}", uri);
api503MasterNotReady.increment();
return complete(StatusCodes.SERVICE_UNAVAILABLE, "Mantis master awaiting to be ready");
});
}
}
}
}
| 7,954 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/InvalidJobRequest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
public class InvalidJobRequest extends Exception {
private static final long serialVersionUID = 1L;
private final JobRequest request;
public InvalidJobRequest(JobRequest request, String msg) {
super(msg);
this.request = request;
}
public InvalidJobRequest(JobRequest request, Throwable e) {
super(e);
this.request = request;
}
public JobRequest getRequest() {
return request;
}
}
| 7,955 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/ClusterFitnessCalculator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master;
import com.netflix.fenzo.TaskRequest;
import com.netflix.fenzo.TaskTrackerState;
import com.netflix.fenzo.VMTaskFitnessCalculator;
import com.netflix.fenzo.VirtualMachineCurrentState;
import com.netflix.fenzo.VirtualMachineLease;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.scheduler.ScheduleRequest;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ClusterFitnessCalculator implements VMTaskFitnessCalculator {
private static final Logger logger = LoggerFactory.getLogger(ClusterFitnessCalculator.class);
private final String clusterAttributeName;
public ClusterFitnessCalculator() {
clusterAttributeName = ConfigurationProvider.getConfig().getSlaveClusterAttributeName();
}
private Optional<String> getAttribute(final VirtualMachineLease lease, final String attributeName) {
boolean hasValue = lease.getAttributeMap() != null
&& lease.getAttributeMap().get(attributeName) != null
&& lease.getAttributeMap().get(attributeName).getText().hasValue();
return hasValue ? Optional.of(lease.getAttributeMap().get(attributeName).getText().getValue()) : Optional.empty();
}
@Override
public String getName() {
return "Mantis Job Cluster Fitness Calculator";
}
@Override
public double calculateFitness(TaskRequest taskRequest, VirtualMachineCurrentState targetVM, TaskTrackerState taskTrackerState) {
final Optional<String> preferredCluster = ((ScheduleRequest) taskRequest)
.getPreferredCluster();
if (preferredCluster.isPresent()) {
// task has a preferred cluster set, check if the preferred cluster matches the targetVM
final Optional<String> targetVMCluster = getAttribute(targetVM.getCurrAvailableResources(),
clusterAttributeName);
if (!targetVMCluster.isPresent() ||
!targetVMCluster.get().equals(preferredCluster.get())) {
// the target VM cluster is missing or does not match, not an ideal fit for this request
if (logger.isDebugEnabled()) {
logger.debug("preferred cluster {} targetVM cluster {}", preferredCluster.get(), targetVMCluster.orElse("missing"));
}
return 0.8;
}
}
// the task request does not have a preference for a particular cluster or the targetVM cluster matches the preferred cluster
// so this VM is a perfect fit, can defer to other fitness criteria for selection
return 1.0;
}
}
| 7,956 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/config/ConfigurationFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.config;
/**
* An implementation of this class should return an instance of {@link io.mantisrx.server.master.config.MasterConfiguration}.
* We create this factory because it's possible that the logic of creating a {@link io.mantisrx.server.master.config.MasterConfiguration}
* can change depending on the user or environment.
*
* @see ConfigurationProvider
*/
public interface ConfigurationFactory {
MasterConfiguration getConfig();
}
| 7,957 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/config/ConfigurationProvider.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.config;
/**
* Provides static and global access to configuration objects. The method {@link ConfigurationProvider#initialize(ConfigurationFactory)}
* must be called before this class can be used.
*
* @see io.mantisrx.server.master.config.ConfigurationFactory
*/
public class ConfigurationProvider {
private static ConfigurationFactory factory;
public static void initialize(ConfigurationFactory aFactory) {
factory = aFactory;
}
// For testing only
static ConfigurationFactory reset() {
ConfigurationFactory current = factory;
factory = null;
return current;
}
/**
* @return a {@link io.mantisrx.server.master.config.MasterConfiguration} object.
*
* @throws IllegalStateException if the method {@link ConfigurationProvider#initialize(ConfigurationFactory)} is not
* called yet.
*/
public static MasterConfiguration getConfig() {
if (factory == null) {
throw new IllegalStateException(String.format("%s#initialize() must be called first. ", ConfigurationFactory.class.getName()));
}
return factory.getConfig();
}
}
| 7,958 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/config/MasterConfiguration.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.config;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.server.core.CoreConfiguration;
import io.mantisrx.server.master.store.KeyValueStore;
import java.time.Duration;
import org.skife.config.Config;
import org.skife.config.Default;
import org.skife.config.DefaultNull;
public interface MasterConfiguration extends CoreConfiguration {
@Config("mantis.master.consoleport")
int getConsolePort();
@Config("mantis.master.apiport")
int getApiPort();
@Config("mantis.master.schedInfoPort")
int getSchedInfoPort();
@Config("mantis.master.apiportv2")
int getApiPortV2();
@Config("mantis.master.metrics.port")
int getMasterMetricsPort();
@Config("mantis.master.api.status.path")
String getApiStatusUri();
@Config("mantis.master.storageProvider")
KeyValueStore getStorageProvider();
@Config("mantis.master.resourceClusterProvider")
String getResourceClusterProvider();
@Config("mantis.master.host")
@DefaultNull
String getMasterHost();
@Config("mantis.master.ip")
@DefaultNull
String getMasterIP();
@Config("mesos.scheduler.driver.init.timeout.sec")
@Default("2")
int getMesosSchedulerDriverInitTimeoutSec();
@Config("mesos.scheduler.driver.init.max.attempts")
@Default("3")
int getMesosSchedulerDriverInitMaxAttempts();
@Config("mesos.worker.timeoutSecondsToReportStart")
@Default("10")
int getTimeoutSecondsToReportStart();
@Config("mantis.master.leader.mismatch.retry.count")
@Default("5")
int getMasterLeaderMismatchRetryCount();
@Config("master.shutdown.curator.service.enabled")
@Default("true")
boolean getShutdownCuratorServiceEnabled();
@Config("mantis.master.api.route.ask.timeout.millis")
@Default("1000")
long getMasterApiAskTimeoutMs();
@Config("mantis.master.api.route.ask.longOperation.timeout.millis")
@Default("2500")
long getMasterApiLongOperationAskTimeoutMs();
@Config("mantis.mesos.enabled")
@Default("true")
boolean getMesosEnabled();
@Config("mesos.master.location")
@Default("localhost:5050")
String getMasterLocation();
@Config("mesos.worker.installDir")
String getWorkerInstallDir();
@Config("mesos.worker.executorscript")
@Default("startup.sh")
String getWorkerExecutorScript();
@Config("mantis.worker.machine.definition.maxCpuCores")
@Default("8")
int getWorkerMachineDefinitionMaxCpuCores();
@Config("mantis.worker.machine.definition.maxMemoryMB")
@Default("28000")
int getWorkerMachineDefinitionMaxMemoryMB();
@Config("mantis.worker.machine.definition.maxNetworkMbps")
@Default("1024")
int getWorkerMachineDefinitionMaxNetworkMbps();
@Config("mantis.master.max.workers.per.stage")
@Default("1500")
int getMaxWorkersPerStage();
@Config("mantis.master.worker.jvm.memory.scale.back.percent")
@Default("10")
int getWorkerJvmMemoryScaleBackPercentage();
@Config("mesos.useSlaveFiltering")
@Default("false")
boolean getUseSlaveFiltering();
@Config("mesos.slaveFilter.attributeName")
@Default("EC2_AMI_ID")
String getSlaveFilterAttributeName();
@Config("mantis.master.active.slave.attribute.name")
@Default("NETFLIX_AUTO_SCALE_GROUP")
String getActiveSlaveAttributeName();
@Config("mantis.master.slave.cluster.attribute.name")
@Default("CLUSTER_NAME")
String getSlaveClusterAttributeName();
@Config("mantis.master.agent.fitness.cluster.weight")
@Default("0.2")
double getPreferredClusterFitnessWeight();
@Config("mantis.master.agent.fitness.durationtype.weight")
@Default("0.5")
double getDurationTypeFitnessWeight();
@Config("mantis.master.agent.fitness.binpacking.weight")
@Default("0.3")
double getBinPackingFitnessWeight();
// Threshold value compared should make sense with the 3 fitness weights above that aggregates the weighted results from
// individual fitness calculators.
@Config("mantis.master.agent.fitness.goodenough.threshold")
@Default("0.63")
double getFitnessGoodEnoughThreshold();
@Config("mantis.master.framework.name")
@Default("MantisFramework")
String getMantisFrameworkName();
@Config("mantis.master.framework.user")
@Default("")
String getMantisFrameworkUserName();
@Config("mantis.worker.executor.name")
@Default("Mantis Worker Executor")
String getWorkerExecutorName();
@Config("mantis.master.mesos.failover.timeout.secs")
@Default("604800.0")
// 604800 secs = 1 week
double getMesosFailoverTimeOutSecs();
// Sleep interval between consecutive scheduler iterations
@Config("mantis.master.scheduler.iteration.interval.millis")
@Default("50")
long getSchedulerIterationIntervalMillis();
@Config("mantis.master.scheduler.disable.slave.duration.secs")
@Default("60")
long getDisableSlaveDurationSecs();
// Sleep interval between consecutive scheduler retries
@Config("mantis.master.scheduler.retry-interval.millis")
@Default("60000") // 1 minute
int getSchedulerIntervalBetweenRetriesInMs();
default Duration getSchedulerIntervalBetweenRetries() {
return Duration.ofMillis(getSchedulerIntervalBetweenRetriesInMs());
}
@Config("mantis.master.scheduler.max-retries")
@Default("10")
int getSchedulerMaxRetries();
@Config("mantis.zookeeper.leader.election.path")
String getLeaderElectionPath();
@Config("mantis.worker.heartbeat.intervalv2.secs")
@Default("20")
long getDefaultWorkerHeartbeatIntervalSecs();
//todo: fix the property name, ideally to mantis.worker.timeout.secs
@Config("mantis.worker.heartbeat.interval.secs")
@Default("60")
long getDefaultWorkerTimeoutSecs();
@Config("mantis.worker.heartbeat.interval.init.secs")
@Default("180")
long getWorkerInitTimeoutSecs();
@Config("mantis.worker.heartbeat.receipts.min.threshold.percent")
@Default("55")
double getHeartbeatReceiptsMinThresholdPercentage();
@Config("mantis.master.stage.assignment.refresh.interval.ms")
@Default("1000")
long getStageAssignmentRefreshIntervalMs();
@Config("mantis.worker.heartbeat.termination.enabled")
@Default("true")
boolean isHeartbeatTerminationEnabled();
@Config("mantis.worker.heartbeat.processing.enabled")
@Default("true")
boolean isHeartbeatProcessingEnabled();
@Config("mantis.interval.move.workers.disabled.vms.millis")
@Default("60000")
long getIntervalMoveWorkersOnDisabledVMsMillis();
@Config("mesos.task.reconciliation.interval.secs")
@Default("300")
long getMesosTaskReconciliationIntervalSecs();
@Config("mesos.lease.offer.expiry.secs")
@Default("300")
long getMesosLeaseOfferExpirySecs();
@Config("mantis.jobs.max.jars.per.named.job")
@Default("10")
int getMaximumNumberOfJarsPerJobName();
@Config("mantis.worker.resubmissions.maximum")
@Default("100")
int getMaximumResubmissionsPerWorker();
@Config("mantis.worker.resubmission.interval.secs")
@Default("5:10:20")
String getWorkerResubmitIntervalSecs();
@Config("mantis.worker.expire.resubmit.delay.secs")
@Default("300")
long getExpireWorkerResubmitDelaySecs();
@Config("mantis.worker.expire.resubmit.execution.interval.secs")
@Default("120")
long getExpireResubmitDelayExecutionIntervalSecs();
@Config("mantis.master.purge.frequency.secs")
@Default("1200")
long getCompletedJobPurgeFrequencySeqs();
@Config("mantis.master.purge.size")
@Default("50")
int getMaxJobsToPurge();
@Config("mantis.worker.state.launched.timeout.millis")
@Default("7000")
long getWorkerInLaunchedStateTimeoutMillis();
@Config("mantis.master.store.worker.writes.batch.size")
@Default("100")
int getWorkerWriteBatchSize();
@Config("mantis.master.ephemeral.job.unsubscribed.timeout.secs")
@Default("300")
long getEphemeralJobUnsubscribedTimeoutSecs();
@Config("mantis.master.init.timeout.secs")
@Default("240")
long getMasterInitTimeoutSecs();
@Config("mantis.master.terminated.job.to.delete.delay.hours")
@Default("360")
// 15 days * 24 hours
long getTerminatedJobToDeleteDelayHours();
@Config("mantis.master.max.archived.jobs.to.cache")
@Default("1000")
int getMaxArchivedJobsToCache();
@Config("mesos.slave.attribute.zone.name")
@Default("AWSZone")
String getHostZoneAttributeName();
@Config("mantis.agent.cluster.autoscale.by.attribute.name")
@Default("CLUSTER_NAME")
String getAutoscaleByAttributeName();
@Config("mantis.agent.cluster.autoscaler.map.hostname.attribute.name")
@Default("EC2_INSTANCE_ID")
String getAutoScalerMapHostnameAttributeName();
@Config("mantis.agent.cluster.autoscaler.shortfall.evaluation.disabled")
@Default("false")
boolean getDisableShortfallEvaluation();
@Config("mantis.scheduling.info.observable.heartbeat.interval.secs")
@Default("120")
long getSchedulingInfoObservableHeartbeatIntervalSecs();
@Config("mantis.job.master.scheduling.info.cores")
@Default("2.0")
double getJobMasterCores();
@Config("mantis.job.master.scheduling.info.memoryMB")
@Default("4096.0")
double getJobMasterMemoryMB();
@Config("mantis.job.master.scheduling.info.networkMbps")
@Default("128.0")
double getJobMasterNetworkMbps();
@Config("mantis.job.master.scheduling.info.diskMB")
@Default("100.0")
double getJobMasterDiskMB();
@Config("mantis.master.api.cache.ttl.milliseconds")
@Default("250")
int getApiCacheTtlMilliseconds();
@Config("mantis.master.api.cache.size.max")
@Default("50")
int getApiCacheMaxSize();
@Config("mantis.master.api.cache.size.min")
@Default("5")
int getApiCacheMinSize();
@Config("mantis.agent.heartbeat.interval.ms")
@Default("300000") // 5 minutes
int getHeartbeatIntervalInMs();
/**
* Config value for each {@link io.mantisrx.master.resourcecluster.ResourceClusterScalerActor}'s timer to trigger
* check on current cluster usage.
*/
@Config("mantis.job.master.resource.cluster.scaler.interval.secs")
@Default("60")
int getScalerTriggerThresholdInSecs();
/**
* Config value for each {@link io.mantisrx.master.resourcecluster.ResourceClusterScalerActor}'s timer to refresh
* its cached scale rules.
*/
@Config("mantis.job.master.resource.cluster.scaler.ruleset.refresh.secs")
@Default("180")
int getScalerRuleSetRefreshThresholdInSecs();
@Config("mantis.agent.assignment.interval.ms")
@Default("60000") // 1 minute
int getAssignmentIntervalInMs();
@Config("mantis.job.costsCalculator.class")
@Default("io.mantisrx.master.jobcluster.job.NoopCostsCalculator")
CostsCalculator getJobCostsCalculator();
@Config("mantis.job.worker.max.artifacts.to.cache")
@Default("5")
int getMaxJobArtifactsToCache();
@Config("mantis.artifactCaching.jobClusters")
@Default("")
String getJobClustersWithArtifactCachingEnabled();
@Config("mantis.artifactCaching.enabled")
@Default("true")
boolean isJobArtifactCachingEnabled();
// rate limit actions on resource cluster actor to control backlog.
@Config("mantis.master.resource.cluster.actions.permitsPerSecond")
@Default("2000")
int getResourceClusterActionsPermitsPerSecond();
default Duration getHeartbeatInterval() {
return Duration.ofMillis(getHeartbeatIntervalInMs());
}
default Duration getMaxAssignmentThreshold() {
return Duration.ofMillis(getAssignmentIntervalInMs());
}
}
| 7,959 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/config/StaticPropertiesConfigurationFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.config;
import io.mantisrx.server.core.MetricsCoercer;
import java.util.Properties;
import org.skife.config.ConfigurationObjectFactory;
public class StaticPropertiesConfigurationFactory implements ConfigurationFactory {
private final ConfigurationObjectFactory delegate;
private final MasterConfiguration config;
private final Properties properties;
public StaticPropertiesConfigurationFactory(Properties props) {
this.properties = props;
delegate = new ConfigurationObjectFactory(props);
delegate.addCoercible(new MetricsCoercer(props));
delegate.addCoercible(clazz -> {
return className -> {
try {
if (clazz.isAssignableFrom(Class.forName(className))) {
try {
return Class.forName(className).newInstance();
} catch (Exception e) {
throw new IllegalArgumentException(
String.format(
"The value %s is not a valid class name for %s implementation. ",
className,
clazz.getName()));
}
} else {
return null;
}
} catch (ClassNotFoundException e) {
return null;
}
};
});
config = delegate.build(MasterConfiguration.class);
}
@Override
public MasterConfiguration getConfig() {
return this.config;
}
@Override
public String toString() {
return "StaticPropertiesConfigurationFactory{" +
"delegate=" + delegate +
", config=" + config +
'}';
}
}
| 7,960 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/agentdeploy/MigrationStrategyFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.agentdeploy;
import io.mantisrx.runtime.MigrationStrategy;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.server.master.utils.MantisSystemClock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MigrationStrategyFactory {
private static final Logger logger = LoggerFactory.getLogger(MigrationStrategyFactory.class);
public static MigrationStrategy getStrategy(final String jobId, final WorkerMigrationConfig config) {
switch (config.getStrategy()) {
case PERCENTAGE:
return new PercentageMigrationStrategy(MantisSystemClock.INSTANCE, jobId, config);
case ONE_WORKER:
return new OneWorkerPerTickMigrationStrategy(MantisSystemClock.INSTANCE, jobId, config);
default:
logger.error("unknown strategy type {} in config {}, using default strategy to migrate 25 percent every 1 min", config.getStrategy(), config);
return new PercentageMigrationStrategy(MantisSystemClock.INSTANCE, jobId,
new WorkerMigrationConfig(
WorkerMigrationConfig.MigrationStrategyEnum.PERCENTAGE,
"{\"percentToMove\":25,\"intervalMs\":60000}"));
}
}
}
| 7,961 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/agentdeploy/PercentageMigrationStrategy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.agentdeploy;
import io.mantisrx.runtime.MigrationStrategy;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.utils.MantisClock;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentSkipListSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class PercentageMigrationStrategy extends MigrationStrategy {
private static final Logger logger = LoggerFactory.getLogger(PercentageMigrationStrategy.class);
private static final int DEFAULT_PERCENT_WORKERS = 10;
private static final ObjectMapper objectMapper = new ObjectMapper();
static {
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
private final MantisClock clock;
private final String jobId;
private final Configuration configuration;
public PercentageMigrationStrategy(final MantisClock clock,
final String jobId,
final WorkerMigrationConfig config) {
super(config);
this.clock = clock;
this.jobId = jobId;
long defaultMigrationIntervalMs;
try {
defaultMigrationIntervalMs = ConfigurationProvider.getConfig().getIntervalMoveWorkersOnDisabledVMsMillis();
} catch (IllegalStateException ise) {
logger.warn("Error reading intervalMoveWorkersOnDisabledVMsMillis from config Provider, will default to 1 minute");
defaultMigrationIntervalMs = 60_000L;
}
configuration = parseConfig(config.getConfigString(), defaultMigrationIntervalMs);
}
Configuration parseConfig(final String configuration, final long defaultMigrationIntervalMs) {
try {
return objectMapper.readValue(configuration, Configuration.class);
} catch (IOException e) {
logger.error("failed to parse config '{}' for job {}, default to {} percent workers migrated every {} millis", configuration, jobId, DEFAULT_PERCENT_WORKERS, defaultMigrationIntervalMs);
return new Configuration(DEFAULT_PERCENT_WORKERS, defaultMigrationIntervalMs);
}
}
@Override
public List<Integer> execute(final ConcurrentSkipListSet<Integer> workersOnDisabledVms,
final int numRunningWorkers,
final int totalNumWorkers,
final long lastWorkerMigrationTimestamp) {
if (lastWorkerMigrationTimestamp > (clock.now() - configuration.getIntervalMs())) {
return Collections.emptyList();
}
if (workersOnDisabledVms.isEmpty()) {
return Collections.emptyList();
}
final int numWorkersOnDisabledVM = workersOnDisabledVms.size();
final int numInactiveWorkers = totalNumWorkers - numRunningWorkers;
int numWorkersToMigrate = Math.min(numWorkersOnDisabledVM, Math.max(1, (int) Math.ceil(totalNumWorkers * configuration.getPercentToMove() / 100.0)));
// If we already have inactive workers for the job, don't migrate more workers as we could end up with all workers in not running state for a job
if (numInactiveWorkers >= numWorkersToMigrate) {
logger.debug("[{}] num inactive workers {} > num workers to migrate {}, suppressing percent migrate", jobId, numInactiveWorkers, numWorkersToMigrate);
return Collections.emptyList();
} else {
// ensure no more than percentToMove workers for the job are in inactive state
numWorkersToMigrate = numWorkersToMigrate - numInactiveWorkers;
}
final List<Integer> workersToMigrate = new ArrayList<>(numWorkersToMigrate);
for (int i = numWorkersToMigrate; i > 0; i--) {
final Integer workerToMigrate = workersOnDisabledVms.pollFirst();
if (workerToMigrate != null) {
workersToMigrate.add(workerToMigrate);
}
}
if (workersToMigrate.size() > 0) {
logger.debug("migrating jobId {} workers {}", jobId, workersToMigrate);
}
return workersToMigrate;
}
public Configuration getConfiguration() {
return configuration;
}
static class Configuration {
private final int percentToMove;
private final long intervalMs;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public Configuration(@JsonProperty("percentToMove") final int percentToMove,
@JsonProperty("intervalMs") final long intervalMs) {
this.percentToMove = percentToMove;
this.intervalMs = intervalMs;
}
public int getPercentToMove() {
return percentToMove;
}
public long getIntervalMs() {
return intervalMs;
}
}
}
| 7,962 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/agentdeploy/OneWorkerPerTickMigrationStrategy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.agentdeploy;
import io.mantisrx.runtime.MigrationStrategy;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.utils.MantisClock;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentSkipListSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class OneWorkerPerTickMigrationStrategy extends MigrationStrategy {
private static final Logger logger = LoggerFactory.getLogger(OneWorkerPerTickMigrationStrategy.class);
private final String jobId;
private final MantisClock clock;
private long intervalMoveWorkersOnDisabledVMsMillis;
public OneWorkerPerTickMigrationStrategy(final MantisClock clock,
final String jobId,
final WorkerMigrationConfig config) {
super(config);
this.clock = clock;
this.jobId = jobId;
try {
this.intervalMoveWorkersOnDisabledVMsMillis = ConfigurationProvider.getConfig().getIntervalMoveWorkersOnDisabledVMsMillis();
} catch (IllegalStateException ise) {
logger.warn("[{}] Error reading intervalMoveWorkersOnDisabledVMsMillis from config Provider, will default to 1 minute", jobId);
this.intervalMoveWorkersOnDisabledVMsMillis = 60_000L;
}
}
@Override
public List<Integer> execute(final ConcurrentSkipListSet<Integer> workersOnDisabledVms,
final int numRunningWorkers,
final int totalNumWorkers,
final long lastMovedWorkerOnDisabledVM) {
if (lastMovedWorkerOnDisabledVM > (clock.now() - intervalMoveWorkersOnDisabledVMsMillis)) {
return Collections.emptyList();
}
final Integer workerNumber = workersOnDisabledVms.pollFirst();
if (workerNumber != null) {
return Collections.singletonList(workerNumber);
} else {
return Collections.emptyList();
}
}
}
| 7,963 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/ScheduleRequest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import com.netflix.fenzo.ConstraintEvaluator;
import com.netflix.fenzo.VMTaskFitnessCalculator;
import com.netflix.fenzo.queues.QAttributes;
import com.netflix.fenzo.queues.QueuableTask;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.server.core.domain.JobMetadata;
import io.mantisrx.server.core.domain.WorkerId;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public class ScheduleRequest implements QueuableTask {
public static final QAttributes DEFAULT_Q_ATTRIBUTES = new QAttributes() {
@Override
public String getBucketName() {
return "default";
}
@Override
public int getTierNumber() {
return 0;
}
};
private static final String defaultGrpName = "defaultGrp";
private final WorkerId workerId;
private final int stageNum;
private final int numPortsRequested;
private final JobMetadata jobMetadata;
private final MantisJobDurationType durationType;
private final MachineDefinition machineDefinition;
private final List<ConstraintEvaluator> hardConstraints;
private final List<VMTaskFitnessCalculator> softConstraints;
private final Optional<String> preferredCluster;
private volatile long readyAt;
public ScheduleRequest(final WorkerId workerId,
final int stageNum,
final int numPortsRequested,
final JobMetadata jobMetadata,
final MantisJobDurationType durationType,
final MachineDefinition machineDefinition,
final List<ConstraintEvaluator> hardConstraints,
final List<VMTaskFitnessCalculator> softConstraints,
final long readyAt,
final Optional<String> preferredCluster) {
this.workerId = workerId;
this.stageNum = stageNum;
this.numPortsRequested = numPortsRequested;
this.jobMetadata = jobMetadata;
this.durationType = durationType;
this.machineDefinition = machineDefinition;
this.hardConstraints = hardConstraints;
this.softConstraints = softConstraints;
this.readyAt = readyAt;
this.preferredCluster = preferredCluster;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ScheduleRequest that = (ScheduleRequest) o;
return workerId != null ? workerId.equals(that.workerId) : that.workerId == null;
}
@Override
public int hashCode() {
return workerId != null ? workerId.hashCode() : 0;
}
@Override
public String getId() {
return workerId.getId();
}
public WorkerId getWorkerId() {
return workerId;
}
@Override
public String taskGroupName() {
return defaultGrpName;
}
@Override
public double getCPUs() {
return machineDefinition.getCpuCores();
}
@Override
public double getMemory() {
return machineDefinition.getMemoryMB();
}
@Override
public double getNetworkMbps() {
return machineDefinition.getNetworkMbps();
}
@Override
public double getDisk() {
return machineDefinition.getDiskMB();
}
@Override
public int getPorts() {
return numPortsRequested;
}
public JobMetadata getJobMetadata() {
return jobMetadata;
}
public MachineDefinition getMachineDefinition() {
return machineDefinition;
}
@Override
public Map<String, Double> getScalarRequests() {
return Collections.emptyMap();
}
@Override
public Map<String, NamedResourceSetRequest> getCustomNamedResources() {
return Collections.emptyMap();
}
@Override
public List<ConstraintEvaluator> getHardConstraints() {
return hardConstraints;
}
@Override
public List<VMTaskFitnessCalculator> getSoftConstraints() {
return softConstraints;
}
@Override
public AssignedResources getAssignedResources() {
// not used by Mantis
return null;
}
@Override
public void setAssignedResources(AssignedResources assignedResources) {
// no-op Not using them at this time
}
public MantisJobDurationType getDurationType() {
return durationType;
}
public int getStageNum() {
return stageNum;
}
@Override
public QAttributes getQAttributes() {
return DEFAULT_Q_ATTRIBUTES;
}
@Override
public long getReadyAt() {
return readyAt;
}
@Override
public void safeSetReadyAt(long when) {
readyAt = when;
}
public Optional<String> getPreferredCluster() {
return preferredCluster;
}
@Override
public String toString() {
return "ScheduleRequest{" +
"workerId=" + workerId +
", readyAt=" + readyAt +
'}';
}
}
| 7,964 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/ResourceClusterAwareScheduler.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import akka.actor.ActorRef;
import com.netflix.fenzo.VirtualMachineCurrentState;
import com.netflix.fenzo.VirtualMachineLease;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.scheduler.ResourceClusterAwareSchedulerActor.CancelRequestEvent;
import io.mantisrx.server.master.scheduler.ResourceClusterAwareSchedulerActor.InitializeRunningWorkerRequestEvent;
import io.mantisrx.server.master.scheduler.ResourceClusterAwareSchedulerActor.ScheduleRequestEvent;
import io.mantisrx.shaded.com.google.common.base.Throwables;
import java.util.List;
import java.util.Optional;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@Slf4j
@RequiredArgsConstructor
public class ResourceClusterAwareScheduler implements MantisScheduler {
private final ActorRef schedulerActor;
@Override
public void scheduleWorker(ScheduleRequest scheduleRequest) {
schedulerActor.tell(ScheduleRequestEvent.of(scheduleRequest), null);
}
@Override
public void unscheduleWorker(WorkerId workerId, Optional<String> hostname) {
throw new UnsupportedOperationException(
"This seems to be used only within the SchedulingService which is a MantisScheduler implementation itself; so it's not clear if this is needed or not");
}
@Override
public void unscheduleAndTerminateWorker(WorkerId workerId,
Optional<String> hostname) {
schedulerActor.tell(CancelRequestEvent.of(workerId),null);
}
@Override
public void updateWorkerSchedulingReadyTime(WorkerId workerId, long when) {
throw new UnsupportedOperationException();
}
@Override
public void initializeRunningWorker(ScheduleRequest scheduleRequest, String hostname, String hostID) {
log.info("initializeRunningWorker called for {} and {}", scheduleRequest, hostname);
schedulerActor.tell(
new InitializeRunningWorkerRequestEvent(scheduleRequest, TaskExecutorID.of(hostID)),
null);
}
@Override
public void rescindOffer(String offerId) {
throw new UnsupportedOperationException();
}
@Override
public void rescindOffers(String hostname) {
throw new UnsupportedOperationException();
}
@Override
public void addOffers(List<VirtualMachineLease> offers) {
throw new UnsupportedOperationException();
}
@Override
public void disableVM(String hostname, long durationMillis)
throws IllegalStateException {
throw new UnsupportedOperationException();
}
@Override
public void enableVM(String hostname) {
throw new UnsupportedOperationException();
}
@Override
public List<VirtualMachineCurrentState> getCurrentVMState() {
throw new UnsupportedOperationException();
}
@Override
public void setActiveVmGroups(List<String> activeVmGroups) {
log.info("Active VM Groups is {} as per this stack-trace {}", activeVmGroups,
Throwables.getStackTraceAsString(new Throwable()));
}
}
| 7,965 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/WorkerLaunched.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import java.util.Objects;
import java.util.Optional;
public class WorkerLaunched implements WorkerEvent {
private final WorkerId workerId;
private final int stageNum;
private final String hostname;
private final String vmId;
private final Optional<String> clusterName;
private final Optional<ClusterID> resourceCluster;
private final WorkerPorts ports;
private final long eventTimeMs = System.currentTimeMillis();
public WorkerLaunched(final WorkerId workerId,
final int stageNum,
final String hostname,
final String vmId,
final Optional<String> clusterName,
final Optional<ClusterID> resourceCluster,
final WorkerPorts ports) {
this.workerId = workerId;
this.stageNum = stageNum;
this.hostname = hostname;
this.vmId = vmId;
this.clusterName = clusterName;
this.resourceCluster = resourceCluster;
this.ports = ports;
}
@Override
public WorkerId getWorkerId() {
return workerId;
}
public int getStageNum() {
return stageNum;
}
public String getHostname() {
return hostname;
}
public String getVmId() {
return vmId;
}
public Optional<String> getClusterName() {
return clusterName;
}
public Optional<ClusterID> getResourceCluster() {
return resourceCluster;
}
public WorkerPorts getPorts() {
return ports;
}
@Override
public long getEventTimeMs() {
return eventTimeMs;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
WorkerLaunched that = (WorkerLaunched) o;
return stageNum == that.stageNum &&
eventTimeMs == that.eventTimeMs &&
Objects.equals(workerId, that.workerId) &&
Objects.equals(hostname, that.hostname) &&
Objects.equals(vmId, that.vmId) &&
Objects.equals(clusterName, that.clusterName) &&
Objects.equals(ports, that.ports);
}
@Override
public int hashCode() {
return Objects.hash(workerId, stageNum, hostname, vmId, clusterName, ports, eventTimeMs);
}
@Override
public String toString() {
return "WorkerLaunched{" +
"workerId=" + workerId +
", stageNum=" + stageNum +
", hostname='" + hostname + '\'' +
", vmId='" + vmId + '\'' +
", clusterName=" + clusterName +
", ports=" + ports +
", eventTimeMs=" + eventTimeMs +
'}';
}
}
| 7,966 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/ResourceClusterAwareSchedulerActor.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import static akka.pattern.Patterns.pipe;
import akka.actor.AbstractActorWithTimers;
import akka.actor.Props;
import akka.actor.Status.Failure;
import akka.japi.pf.ReceiveBuilder;
import com.netflix.spectator.api.Tag;
import io.mantisrx.common.Ack;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.Timer;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.ExecuteStageRequestFactory;
import io.mantisrx.server.master.resourcecluster.ResourceCluster;
import io.mantisrx.server.master.resourcecluster.TaskExecutorAllocationRequest;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.server.worker.TaskExecutorGateway;
import io.mantisrx.server.worker.TaskExecutorGateway.TaskNotFoundException;
import io.mantisrx.shaded.com.google.common.base.Throwables;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
import lombok.Value;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.util.ExceptionUtils;
@Slf4j
class ResourceClusterAwareSchedulerActor extends AbstractActorWithTimers {
private final ResourceCluster resourceCluster;
private final ExecuteStageRequestFactory executeStageRequestFactory;
private final JobMessageRouter jobMessageRouter;
private final int maxScheduleRetries;
private final int maxCancelRetries;
private final Duration intervalBetweenRetries;
private final Timer schedulingLatency;
private final Counter schedulingFailures;
private final Counter connectionFailures;
public static Props props(
int maxScheduleRetries,
int maxCancelRetries,
Duration intervalBetweenRetries,
final ResourceCluster resourceCluster,
final ExecuteStageRequestFactory executeStageRequestFactory,
final JobMessageRouter jobMessageRouter,
final MetricsRegistry metricsRegistry) {
return Props.create(ResourceClusterAwareSchedulerActor.class, maxScheduleRetries, maxCancelRetries, intervalBetweenRetries, resourceCluster, executeStageRequestFactory,
jobMessageRouter, metricsRegistry);
}
public ResourceClusterAwareSchedulerActor(
int maxScheduleRetries,
int maxCancelRetries,
Duration intervalBetweenRetries,
ResourceCluster resourceCluster,
ExecuteStageRequestFactory executeStageRequestFactory,
JobMessageRouter jobMessageRouter,
MetricsRegistry metricsRegistry) {
this.resourceCluster = resourceCluster;
this.executeStageRequestFactory = executeStageRequestFactory;
this.jobMessageRouter = jobMessageRouter;
this.maxScheduleRetries = maxScheduleRetries;
this.intervalBetweenRetries = intervalBetweenRetries;
this.maxCancelRetries = maxCancelRetries;
final String metricsGroup = "ResourceClusterAwareSchedulerActor";
final Metrics metrics =
new Metrics.Builder()
.id(metricsGroup, Tag.of("resourceCluster", resourceCluster.getName()))
.addTimer("schedulingLatency")
.addCounter("schedulingFailures")
.addCounter("connectionFailures")
.build();
metricsRegistry.registerAndGet(metrics);
this.schedulingLatency = metrics.getTimer("schedulingLatency");
this.schedulingFailures = metrics.getCounter("schedulingFailures");
this.connectionFailures = metrics.getCounter("connectionFailures");
}
@Override
public Receive createReceive() {
return ReceiveBuilder.create()
.match(ScheduleRequestEvent.class, this::onScheduleRequestEvent)
.match(InitializeRunningWorkerRequestEvent.class, this::onInitializeRunningWorkerRequest)
.match(CancelRequestEvent.class, this::onCancelRequestEvent)
.match(AssignedScheduleRequestEvent.class, this::onAssignedScheduleRequestEvent)
.match(FailedToScheduleRequestEvent.class, this::onFailedScheduleRequestEvent)
.match(SubmittedScheduleRequestEvent.class, this::onSubmittedScheduleRequestEvent)
.match(FailedToSubmitScheduleRequestEvent.class, this::onFailedToSubmitScheduleRequestEvent)
.match(RetryCancelRequestEvent.class, this::onRetryCancelRequestEvent)
.match(Noop.class, this::onNoop)
.match(Ack.class, ack -> log.debug("Received ack from {}", sender()))
.match(Failure.class, failure -> log.error("Received failure from {}: {}", sender(), failure))
.build();
}
private void onScheduleRequestEvent(ScheduleRequestEvent event) {
if (event.isRetry()) {
log.info("Retrying Schedule Request {}, attempt {}", event.getRequest(),
event.getAttempt());
}
CompletableFuture<Object> assignedFuture =
resourceCluster
.getTaskExecutorFor(
TaskExecutorAllocationRequest.of(
event.getRequest().getWorkerId(), event.getRequest().getMachineDefinition(), event.getRequest().getJobMetadata(), event.getRequest().getStageNum()))
.<Object>thenApply(event::onAssignment)
.exceptionally(event::onFailure);
pipe(assignedFuture, getContext().getDispatcher()).to(self());
}
private void onInitializeRunningWorkerRequest(InitializeRunningWorkerRequestEvent request) {
resourceCluster.initializeTaskExecutor(
request.getTaskExecutorID(),
request.getScheduleRequest().getWorkerId());
}
private void onAssignedScheduleRequestEvent(AssignedScheduleRequestEvent event) {
try {
CompletableFuture<TaskExecutorGateway> gatewayFut = resourceCluster.getTaskExecutorGateway(event.getTaskExecutorID());
TaskExecutorRegistration info = resourceCluster.getTaskExecutorInfo(event.getTaskExecutorID()).join();
if (gatewayFut != null && info != null) {
CompletionStage<Object> ackFuture =
gatewayFut
.thenComposeAsync(gateway ->
gateway
.submitTask(
executeStageRequestFactory.of(
event.getScheduleRequestEvent().getRequest(),
info))
.<Object>thenApply(
dontCare -> new SubmittedScheduleRequestEvent(
event.getScheduleRequestEvent(),
event.getTaskExecutorID()))
.exceptionally(
throwable ->
new FailedToSubmitScheduleRequestEvent(
event.getScheduleRequestEvent(),
event.getTaskExecutorID(),
ExceptionUtils.stripCompletionException(throwable))
)
.whenCompleteAsync((res, err) ->
{
if (err == null) {
log.debug("[Submit Task] finish with {}", res);
}
else {
log.error("[Submit Task] fail: {}", event.getTaskExecutorID(), err);
}
})
)
.exceptionally(
// Note: throwable is the wrapped completable error (inside is akka rpc actor selection
// error).
// On this error, we want to:
// 1) trigger rpc service reconnection (to fix the missing action).
// 2) re-schedule worker node with delay (to avoid a fast loop to exhaust idle TE pool).
throwable ->
event.getScheduleRequestEvent().onFailure(throwable)
);
pipe(ackFuture, getContext().getDispatcher()).to(self());
}
} catch (Exception e) {
// we are not able to get the gateway, which either means the node is not great or some transient network issue
// we will retry the request
log.warn(
"Failed to submit task with the task executor {}; Resubmitting the request",
event.getTaskExecutorID(), e);
self().tell(event.getScheduleRequestEvent().onFailure(e), self());
}
}
private void onFailedScheduleRequestEvent(FailedToScheduleRequestEvent event) {
schedulingFailures.increment();
if (event.getAttempt() >= this.maxScheduleRetries) {
log.error("Failed to submit the request {} because of ", event.getScheduleRequestEvent(), event.getThrowable());
} else {
// honor the readyAt attribute from schedule request's rate limiter.
Duration timeout = Duration.ofMillis(
Math.max(
event.getScheduleRequestEvent().getRequest().getReadyAt() - Instant.now().toEpochMilli(),
intervalBetweenRetries.toMillis()));
log.error("Failed to submit the request {}; Retrying in {} because of ",
event.getScheduleRequestEvent(), timeout, event.getThrowable());
getTimers().startSingleTimer(
getSchedulingQueueKeyFor(event.getScheduleRequestEvent().getRequest().getWorkerId()),
event.onRetry(),
timeout);
}
}
private void onSubmittedScheduleRequestEvent(SubmittedScheduleRequestEvent event) {
log.debug("[Submit Task]: receive SubmittedScheduleRequestEvent: {}", event);
final TaskExecutorID taskExecutorID = event.getTaskExecutorID();
try {
final TaskExecutorRegistration info = resourceCluster.getTaskExecutorInfo(taskExecutorID)
.join();
boolean success =
jobMessageRouter.routeWorkerEvent(new WorkerLaunched(
event.getEvent().getRequest().getWorkerId(),
event.getEvent().getRequest().getStageNum(),
info.getHostname(),
taskExecutorID.getResourceId(),
Optional.ofNullable(info.getClusterID().getResourceID()),
Optional.of(info.getClusterID()),
info.getWorkerPorts()));
final Duration latency =
Duration.between(event.getEvent().getEventTime(), Clock.systemDefaultZone().instant());
schedulingLatency.record(latency.toNanos(), TimeUnit.NANOSECONDS);
if (!success) {
log.error(
"Routing message to jobMessageRouter was never expected to fail but it has failed to event {}",
event);
}
} catch (Exception ex) {
log.warn("Failed to route message due to error in getting TaskExecutor info: {}", taskExecutorID, ex);
}
}
private void onFailedToSubmitScheduleRequestEvent(FailedToSubmitScheduleRequestEvent event) {
log.error("Failed to submit schedule request event {}", event, event.getThrowable());
jobMessageRouter.routeWorkerEvent(new WorkerLaunchFailed(
event.getScheduleRequestEvent().getRequest().getWorkerId(),
event.getScheduleRequestEvent().getRequest().getStageNum(),
Throwables.getStackTraceAsString(event.throwable)));
}
private void onCancelRequestEvent(CancelRequestEvent event) {
try {
log.info("onCancelRequestEvent {}", event);
getTimers().cancel(getSchedulingQueueKeyFor(event.getWorkerId()));
final TaskExecutorID taskExecutorID =
resourceCluster.getTaskExecutorAssignedFor(event.getWorkerId()).join();
CompletableFuture<Object> cancelFuture =
resourceCluster.getTaskExecutorGateway(taskExecutorID)
.thenComposeAsync(gateway ->
gateway
.cancelTask(event.getWorkerId())
.<Object>thenApply(dontCare -> Noop.getInstance())
.exceptionally(exception -> {
Throwable actual =
ExceptionUtils.stripCompletionException(
ExceptionUtils.stripExecutionException(exception));
// no need to retry if the TaskExecutor does not know about the task anymore.
if (actual instanceof TaskNotFoundException) {
return Noop.getInstance();
} else {
return event.onFailure(actual);
}
}));
pipe(cancelFuture, context().dispatcher()).to(self());
} catch (Exception e) {
Throwable throwable =
ExceptionUtils.stripCompletionException(ExceptionUtils.stripExecutionException(e));
if (!(throwable instanceof TaskNotFoundException)) {
// something failed and its not TaskNotFoundException
// which implies this is still a valid request
self().tell(event.onFailure(throwable), self());
} else {
log.info("Failed to cancel task {} as no matching executor could be found", event.getWorkerId());
}
}
}
private void onRetryCancelRequestEvent(RetryCancelRequestEvent event) {
if (event.getActualEvent().getAttempt() < maxCancelRetries) {
context().system()
.scheduler()
.scheduleOnce(
Duration.ofMinutes(1),
self(), // received
event.onRetry(), // event
getContext().getDispatcher(), // executor
self()); // sender
} else {
log.error("Exhausted number of retries for cancel request {}", event.getActualEvent(),
event.getCurrentFailure());
}
}
private void onNoop(Noop event) {
}
@Value
static class ScheduleRequestEvent {
ScheduleRequest request;
int attempt;
@Nullable
Throwable previousFailure;
Instant eventTime;
boolean isRetry() {
return attempt > 1;
}
static ScheduleRequestEvent of(ScheduleRequest request) {
return new ScheduleRequestEvent(request, 1, null, Clock.systemDefaultZone().instant());
}
FailedToScheduleRequestEvent onFailure(Throwable throwable) {
return new FailedToScheduleRequestEvent(
this, this.attempt, ExceptionUtils.stripCompletionException(throwable));
}
AssignedScheduleRequestEvent onAssignment(TaskExecutorID taskExecutorID) {
return new AssignedScheduleRequestEvent(this, taskExecutorID);
}
}
@Value
static class InitializeRunningWorkerRequestEvent {
ScheduleRequest scheduleRequest;
TaskExecutorID taskExecutorID;
}
@Value
private static class FailedToScheduleRequestEvent {
ScheduleRequestEvent scheduleRequestEvent;
int attempt;
Throwable throwable;
private ScheduleRequestEvent onRetry() {
return new ScheduleRequestEvent(
scheduleRequestEvent.getRequest(),
attempt + 1,
this.throwable,
scheduleRequestEvent.getEventTime());
}
}
@Value
private static class AssignedScheduleRequestEvent {
ScheduleRequestEvent scheduleRequestEvent;
TaskExecutorID taskExecutorID;
}
@Value
private static class SubmittedScheduleRequestEvent {
ScheduleRequestEvent event;
TaskExecutorID taskExecutorID;
}
@Value
private static class FailedToSubmitScheduleRequestEvent {
ScheduleRequestEvent scheduleRequestEvent;
TaskExecutorID taskExecutorID;
Throwable throwable;
}
@Value
static class CancelRequestEvent {
WorkerId workerId;
int attempt;
Throwable previousFailure;
static CancelRequestEvent of(WorkerId workerId) {
return new CancelRequestEvent(workerId, 1, null);
}
RetryCancelRequestEvent onFailure(Throwable throwable) {
return new RetryCancelRequestEvent(this, throwable);
}
}
@Value
private static class RetryCancelRequestEvent {
CancelRequestEvent actualEvent;
Throwable currentFailure;
CancelRequestEvent onRetry() {
return new CancelRequestEvent(actualEvent.getWorkerId(),
actualEvent.getAttempt() + 1, currentFailure);
}
}
@Value(staticConstructor = "getInstance")
private static class Noop {
}
private String getSchedulingQueueKeyFor(WorkerId workerId) {
return "Retry-Schedule-Request-For" + workerId.toString();
}
}
| 7,967 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/JobMessageRouter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
public interface JobMessageRouter {
/**
* @param workerEvent worker event from Scheduler
*
* @return true if workerEvent was handled successfully, otherwise false
*/
boolean routeWorkerEvent(final WorkerEvent workerEvent);
}
| 7,968 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/WorkerRegistry.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import javax.annotation.Nullable;
public interface WorkerRegistry {
/**
* get the number of workers that are running in a given cluster.
* @param clusterID optional parameter to filter only workers for a given cluster.
* If null, then workers without a clusterID are filtered.
* @return number of workers in LAUNCHED, START_INITIATED and STARTED state
*/
int getNumRunningWorkers(@Nullable ClusterID clusterID);
/* Returns the set of all workers in LAUNCHED, START_INITIATED and STARTED state */
Set<WorkerId> getAllRunningWorkers(@Nullable ClusterID clusterID);
/* Returns the map of all workers to SlaveId in LAUNCHED, START_INITIATED and STARTED state */
Map<WorkerId, String> getAllRunningWorkerSlaveIdMappings(@Nullable ClusterID clusterID);
/**
* @param workerId id to check
*
* @return false is job/worker is in Terminal State, otherwise true
*/
boolean isWorkerValid(final WorkerId workerId);
/**
* Get time at which the worker was Accepted
*
* @param workerId Worker ID
*
* @return time when worker was Accepted
*/
Optional<Long> getAcceptedAt(final WorkerId workerId);
}
| 7,969 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/MantisScheduler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import com.netflix.fenzo.VirtualMachineCurrentState;
import com.netflix.fenzo.VirtualMachineLease;
import io.mantisrx.server.core.domain.WorkerId;
import java.util.List;
import java.util.Optional;
public interface MantisScheduler {
/**
* Add a worker to the Scheduler queue
*
* @param scheduleRequest worker to schedule
*/
void scheduleWorker(final ScheduleRequest scheduleRequest);
/**
* Mark the worker to be removed from the Scheduling queue. This is expected to be called for all tasks that were added to the Scheduler, whether or
* not the worker is already running. If the worker is running, the <code>hostname</code> parameter must be set, otherwise,
* it can be <code>Optional.empty()</code>. The actual remove operation is performed before the start of the next scheduling
* iteration.
*
* @param workerId The Id of the worker to be removed.
* @param hostname The name of the VM where the worker was assigned resources from, or, <code>Optional.empty()</code> if it was
* not assigned any resources.
*/
void unscheduleWorker(final WorkerId workerId, final Optional<String> hostname);
/**
* Mark the worker to be removed from the Scheduling queue and terminate the running container. This is expected to be called for all tasks that were added to the Scheduler, whether or
* not the worker is already running. If the worker is running, the <code>hostname</code> parameter must be set, otherwise,
* it can be <code>Optional.empty()</code>. The actual remove operation is performed before the start of the next scheduling
* iteration.
*
* @param workerId The Id of the worker to be removed.
* @param hostname The name of the VM where the worker was assigned resources from, or, <code>Optional.empty()</code> if it was
* not assigned any resources.
*/
void unscheduleAndTerminateWorker(final WorkerId workerId, final Optional<String> hostname);
/**
* Set the wall clock time when this worker is ready for consideration for resource allocation.
*
* @param workerId The Id of the task.
* @param when The wall clock time in millis when the task is ready for consideration for assignment.
*/
void updateWorkerSchedulingReadyTime(final WorkerId workerId, final long when);
/**
* Mark the given workers as running. This is expected to be called for all workers that were already running from before
* {@link com.netflix.fenzo.TaskSchedulingService} started running. For example, when the scheduling service
* is being started after a restart of the system and there were some workers launched in the previous run of
* the system. Any workers assigned resources during scheduling invoked by this service will be automatically marked
* as running.
* <p>
*
* @param scheduleRequest The scheduleRequest(worker) to mark as running
* @param hostname The name of the VM that the task is running on.
*/
void initializeRunningWorker(final ScheduleRequest scheduleRequest, final String hostname, final String hostID);
/**
* Informs the scheduler that the offer has been revoked. Typically called by the Resource Manager
*
* @param offerId ID of the offer being revoked
*/
void rescindOffer(final String offerId);
/**
* Informs the scheduler to reject all offers for this hostname.
*
* @param hostname host
*/
void rescindOffers(final String hostname);
/**
* Informs the scheduler of new offers received from the Resource Manager
*
* @param offers new offers from Resource Manager
*/
void addOffers(final List<VirtualMachineLease> offers);
/**
* Reject offers from this host for durationMillis
*
* @param hostname host to disable
* @param durationMillis duration in milliseconds
*
* @throws IllegalStateException
*/
void disableVM(final String hostname, final long durationMillis) throws IllegalStateException;
/**
* Enable a host to allow using its resource offers for task assignment, only required if the host was explicitly disabled
*
* @param hostname host to enable
*/
void enableVM(final String hostname);
/**
* Get the current states of all known VMs.
*/
List<VirtualMachineCurrentState> getCurrentVMState();
/**
* Set the list of VM group names that are active. VMs (hosts) that belong to groups that you do not include
* in this list are considered disabled. The scheduler does not use the resources of disabled hosts when it
* allocates tasks. If you pass in a null list, this indicates that the scheduler should consider all groups
* to be enabled.
*
* @param activeVmGroups a list of VM group names that the scheduler is to consider to be enabled, or {@code null}
* if the scheduler is to consider every group to be enabled
*/
void setActiveVmGroups(final List<String> activeVmGroups);
}
| 7,970 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/SchedulingStateManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import com.netflix.fenzo.VirtualMachineCurrentState;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
public class SchedulingStateManager {
private final AtomicReference<List<VirtualMachineCurrentState>> lastKnownVMState = new AtomicReference<>(null);
public List<VirtualMachineCurrentState> getVMCurrentState() {
return lastKnownVMState.get();
}
public void setVMCurrentState(final List<VirtualMachineCurrentState> latestState) {
lastKnownVMState.set(latestState);
}
}
| 7,971 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/WorkerLaunchFailed.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import io.mantisrx.server.core.domain.WorkerId;
import java.util.Objects;
public class WorkerLaunchFailed implements WorkerEvent {
private final WorkerId workerId;
private final int stageNum;
private final String errorMessage;
private final long eventTimeMs = System.currentTimeMillis();
public WorkerLaunchFailed(final WorkerId workerId,
final int stageNum,
final String errorMessage) {
this.workerId = workerId;
this.stageNum = stageNum;
this.errorMessage = errorMessage;
}
@Override
public WorkerId getWorkerId() {
return workerId;
}
public int getStageNum() {
return stageNum;
}
public String getErrorMessage() {
return errorMessage;
}
@Override
public long getEventTimeMs() {
return eventTimeMs;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
WorkerLaunchFailed that = (WorkerLaunchFailed) o;
return stageNum == that.stageNum &&
eventTimeMs == that.eventTimeMs &&
Objects.equals(workerId, that.workerId) &&
Objects.equals(errorMessage, that.errorMessage);
}
@Override
public int hashCode() {
return Objects.hash(workerId, stageNum, errorMessage, eventTimeMs);
}
@Override
public String toString() {
return "WorkerLaunchFailed{" +
"workerId=" + workerId +
", stageNum=" + stageNum +
", errorMessage='" + errorMessage + '\'' +
", eventTimeMs=" + eventTimeMs +
'}';
}
}
| 7,972 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/WorkerResourceStatus.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import io.mantisrx.server.core.domain.WorkerId;
import java.util.Objects;
public class WorkerResourceStatus implements WorkerEvent {
private final WorkerId workerId;
private final String message;
private final VMResourceState state;
private final long eventTimeMs = System.currentTimeMillis();
public WorkerResourceStatus(final WorkerId workerId,
final String message,
final VMResourceState state) {
this.workerId = workerId;
this.message = message;
this.state = state;
}
@Override
public WorkerId getWorkerId() {
return workerId;
}
public String getMessage() {
return message;
}
public VMResourceState getState() {
return state;
}
@Override
public long getEventTimeMs() {
return eventTimeMs;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
WorkerResourceStatus that = (WorkerResourceStatus) o;
return eventTimeMs == that.eventTimeMs &&
Objects.equals(workerId, that.workerId) &&
Objects.equals(message, that.message) &&
state == that.state;
}
@Override
public int hashCode() {
return Objects.hash(workerId, message, state, eventTimeMs);
}
@Override
public String toString() {
return "WorkerResourceStatus{" +
"workerId=" + workerId +
", message='" + message + '\'' +
", state=" + state +
", eventTimeMs=" + eventTimeMs +
'}';
}
public enum VMResourceState {
STARTED,
START_INITIATED,
COMPLETED,
FAILED
}
}
| 7,973 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/LaunchTaskRequest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import io.mantisrx.common.WorkerPorts;
public class LaunchTaskRequest {
private final ScheduleRequest scheduleRequest;
private final WorkerPorts ports;
public LaunchTaskRequest(ScheduleRequest scheduleRequest, WorkerPorts ports) {
this.scheduleRequest = scheduleRequest;
this.ports = ports;
}
public ScheduleRequest getScheduleRequest() {
return scheduleRequest;
}
public WorkerPorts getPorts() {
return ports;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
LaunchTaskRequest that = (LaunchTaskRequest) o;
if (scheduleRequest != null ? !scheduleRequest.equals(that.scheduleRequest) : that.scheduleRequest != null)
return false;
return ports != null ? ports.equals(that.ports) : that.ports == null;
}
@Override
public int hashCode() {
int result = scheduleRequest != null ? scheduleRequest.hashCode() : 0;
result = 31 * result + (ports != null ? ports.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "LaunchTaskRequest{" +
"scheduleRequest=" + scheduleRequest +
", ports=" + ports +
'}';
}
}
| 7,974 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/WorkerEvent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import io.mantisrx.server.core.domain.WorkerId;
public interface WorkerEvent {
WorkerId getWorkerId();
// Return the time at which this event was created
long getEventTimeMs();
}
| 7,975 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/WorkerUnscheduleable.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import io.mantisrx.server.core.domain.WorkerId;
import java.util.Objects;
public class WorkerUnscheduleable implements WorkerEvent {
private final WorkerId workerId;
private final int stageNum;
private final long eventTimeMs = System.currentTimeMillis();
public WorkerUnscheduleable(final WorkerId workerId,
final int stageNum) {
this.workerId = workerId;
this.stageNum = stageNum;
}
@Override
public WorkerId getWorkerId() {
return workerId;
}
public int getStageNum() {
return stageNum;
}
@Override
public long getEventTimeMs() {
return eventTimeMs;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
WorkerUnscheduleable that = (WorkerUnscheduleable) o;
return stageNum == that.stageNum &&
eventTimeMs == that.eventTimeMs &&
Objects.equals(workerId, that.workerId);
}
@Override
public int hashCode() {
return Objects.hash(workerId, stageNum, eventTimeMs);
}
@Override
public String toString() {
return "WorkerUnscheduleable{" +
"workerId=" + workerId +
", stageNum=" + stageNum +
", eventTimeMs=" + eventTimeMs +
'}';
}
}
| 7,976 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/MantisSchedulerFactoryImpl.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import akka.actor.ActorSystem;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.server.master.ExecuteStageRequestFactory;
import io.mantisrx.server.master.SchedulingService;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ResourceClusters;
import io.mantisrx.shaded.com.google.common.base.Strings;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@RequiredArgsConstructor
@Slf4j
public class MantisSchedulerFactoryImpl implements MantisSchedulerFactory {
private final ActorSystem actorSystem;
private final ResourceClusters resourceClusters;
private final ExecuteStageRequestFactory executeStageRequestFactory;
private final JobMessageRouter jobMessageRouter;
private final SchedulingService mesosSchedulingService;
private final MasterConfiguration masterConfiguration;
private final MetricsRegistry metricsRegistry;
private final Map<ClusterID, MantisScheduler> actorRefMap = new ConcurrentHashMap<>();
@Override
public MantisScheduler forClusterID(@Nullable ClusterID clusterID) {
Optional<ClusterID> clusterIDOptional = Optional.ofNullable(clusterID);
if (clusterIDOptional.isPresent()) {
if (Strings.isNullOrEmpty(clusterIDOptional.get().getResourceID())) {
log.error("Received empty resource id: {}", clusterIDOptional.get());
throw new RuntimeException("Empty resourceID in clusterID for MantisScheduler");
}
return
actorRefMap.computeIfAbsent(
clusterID,
(cid) -> {
log.info("Created scheduler actor for cluster: {}",
clusterIDOptional.get().getResourceID());
return new ResourceClusterAwareScheduler(actorSystem.actorOf(
ResourceClusterAwareSchedulerActor.props(
masterConfiguration.getSchedulerMaxRetries(),
masterConfiguration.getSchedulerMaxRetries(),
masterConfiguration.getSchedulerIntervalBetweenRetries(),
resourceClusters.getClusterFor(cid),
executeStageRequestFactory,
jobMessageRouter,
metricsRegistry),
"scheduler-for-" + cid.getResourceID()));
});
} else {
return mesosSchedulingService;
}
}
}
| 7,977 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/WorkerOnDisabledVM.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import io.mantisrx.server.core.domain.WorkerId;
import java.util.Objects;
public class WorkerOnDisabledVM implements WorkerEvent {
private final WorkerId workerId;
private final long eventTimeMs = System.currentTimeMillis();
public WorkerOnDisabledVM(final WorkerId workerId) {
this.workerId = workerId;
}
@Override
public WorkerId getWorkerId() {
return workerId;
}
@Override
public long getEventTimeMs() {
return eventTimeMs;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
WorkerOnDisabledVM that = (WorkerOnDisabledVM) o;
return eventTimeMs == that.eventTimeMs &&
Objects.equals(workerId, that.workerId);
}
@Override
public int hashCode() {
return Objects.hash(workerId, eventTimeMs);
}
@Override
public String toString() {
return "WorkerOnDisabledVM{" +
"workerId=" + workerId +
", eventTimeMs=" + eventTimeMs +
'}';
}
}
| 7,978 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/scheduler/MantisSchedulerFactory.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.scheduler;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import java.util.Optional;
import javax.annotation.Nullable;
/**
* Factory for the Mantis Scheduler based on the JobDefinition
*/
@FunctionalInterface
public interface MantisSchedulerFactory {
default MantisScheduler forJob(JobDefinition jobDefinition) {
Optional<ClusterID> clusterIDOptional = jobDefinition.getResourceCluster();
return forClusterID(clusterIDOptional.orElse(null));
}
/**
* returns the MantisScheduler based on the ClusterID.
*
* @param clusterID cluster ID for which the mantisscheduler is requested.
* @return MantisScheduler corresponding to the ClusterID.
*/
MantisScheduler forClusterID(@Nullable ClusterID clusterID);
}
| 7,979 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/utils/MantisSystemClock.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.utils;
public class MantisSystemClock implements MantisClock {
public static final MantisSystemClock INSTANCE = new MantisSystemClock();
@Override
public long now() {
return System.currentTimeMillis();
}
}
| 7,980 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/utils/MantisClock.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.utils;
public interface MantisClock {
/**
* @return the difference, measured in milliseconds, between
* the current time and midnight, January 1, 1970 UTC.
*/
long now();
}
| 7,981 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/utils/MantisUserClock.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.utils;
public class MantisUserClock implements MantisClock {
private volatile long currentTime = 0l;
@Override
public long now() {
return currentTime;
}
public void setNow(final long timestamp) {
currentTime = timestamp;
}
public void advanceTime(final long delta) {
currentTime += delta;
}
}
| 7,982 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/mesos/VirtualMachineMasterServiceMesosImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.mesos;
import com.google.protobuf.ByteString;
import com.netflix.fenzo.VirtualMachineLease;
import io.mantisrx.common.JsonSerializer;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.core.BaseService;
import io.mantisrx.server.core.ExecuteStageRequest;
import io.mantisrx.server.core.WorkerTopologyInfo;
import io.mantisrx.server.core.domain.JobMetadata;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.LaunchTaskException;
import io.mantisrx.server.master.VirtualMachineMasterService;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.server.master.scheduler.LaunchTaskRequest;
import io.mantisrx.server.master.scheduler.ScheduleRequest;
import java.io.IOException;
import java.net.URL;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.mesos.Protos;
import org.apache.mesos.Protos.CommandInfo;
import org.apache.mesos.Protos.ExecutorID;
import org.apache.mesos.Protos.ExecutorInfo;
import org.apache.mesos.Protos.Offer;
import org.apache.mesos.Protos.Resource;
import org.apache.mesos.Protos.TaskID;
import org.apache.mesos.Protos.TaskInfo;
import org.apache.mesos.Protos.Value;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.functions.Action0;
public class VirtualMachineMasterServiceMesosImpl extends BaseService implements VirtualMachineMasterService {
private static final Logger logger = LoggerFactory.getLogger(VirtualMachineMasterServiceMesosImpl.class);
private final String masterDescriptionJson;
private final MesosDriverSupplier mesosDriver;
private final AtomicBoolean initializationDone = new AtomicBoolean(false);
private volatile int workerJvmMemoryScaleBackPct;
private MasterConfiguration masterConfig;
private ExecutorService executor;
private final JsonSerializer jsonSerializer = new JsonSerializer();
public VirtualMachineMasterServiceMesosImpl(
final MasterConfiguration masterConfig,
final String masterDescriptionJson,
final MesosDriverSupplier mesosSchedulerDriverSupplier) {
super(true);
this.masterConfig = masterConfig;
this.masterDescriptionJson = masterDescriptionJson;
this.mesosDriver = mesosSchedulerDriverSupplier;
executor = Executors.newSingleThreadExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r, "vm_master_mesos_scheduler_thread");
t.setDaemon(true);
return t;
}
});
workerJvmMemoryScaleBackPct = Math.min(99, ConfigurationProvider.getConfig().getWorkerJvmMemoryScaleBackPercentage());
}
// NOTE: All leases are for the same agent.
@Override
public Map<ScheduleRequest, LaunchTaskException> launchTasks(List<LaunchTaskRequest> requests, List<VirtualMachineLease> leases) {
if (!super.getIsInited()) {
logger.error("Not in leader mode, not launching tasks");
return new HashMap<>();
}
Protos.SlaveID slaveID = leases.get(0).getOffer().getSlaveId();
List<Protos.OfferID> offerIDs = new ArrayList<>();
for (VirtualMachineLease vml : leases)
offerIDs.add(vml.getOffer().getId());
Map<ScheduleRequest, LaunchTaskException> errorResults = new HashMap<>();
List<TaskInfo> taskInfos = new ArrayList<>();
for (LaunchTaskRequest request : requests) {
try {
taskInfos.addAll(createTaskInfo(slaveID, request));
} catch (LaunchTaskException e) {
errorResults.put(request.getScheduleRequest(), e);
}
}
if (!taskInfos.isEmpty())
mesosDriver.get().launchTasks(offerIDs, taskInfos);
else { // reject offers to prevent offer leak, but shouldn't happen
for (VirtualMachineLease l : leases) {
mesosDriver.get().declineOffer(l.getOffer().getId());
}
}
return errorResults;
}
@Override
public void rejectLease(VirtualMachineLease lease) {
if (!super.getIsInited()) {
logger.error("Not in leader mode, not rejecting lease");
return;
}
VirtualMachineLeaseMesosImpl mesosLease = (VirtualMachineLeaseMesosImpl) lease;
Offer offer = mesosLease.getOffer();
mesosDriver.get().declineOffer(offer.getId());
}
private Collection<TaskInfo> createTaskInfo(Protos.SlaveID slaveID, final LaunchTaskRequest launchTaskRequest) throws LaunchTaskException {
final ScheduleRequest scheduleRequest = launchTaskRequest.getScheduleRequest();
String name = scheduleRequest.getWorkerId().getJobCluster() + " (stage: "
+ scheduleRequest.getStageNum() + " of " + scheduleRequest.getJobMetadata().getTotalStages() + ")";
TaskID taskId = TaskID.newBuilder()
.setValue(scheduleRequest.getWorkerId().getId())
.build();
MachineDefinition machineDefinition = scheduleRequest.getMachineDefinition();
// grab ports within range
List<Integer> ports = launchTaskRequest.getPorts().getAllPorts();
TaskInfo taskInfo = null;
try {
TaskInfo.Builder taskInfoBuilder = TaskInfo.newBuilder();
ExecuteStageRequest executeStageRequest = new ExecuteStageRequest(
scheduleRequest.getWorkerId().getJobCluster(),
scheduleRequest.getWorkerId().getJobId(),
scheduleRequest.getWorkerId().getWorkerIndex(),
scheduleRequest.getWorkerId().getWorkerNum(),
scheduleRequest.getJobMetadata().getJobJarUrl(),
scheduleRequest.getStageNum(),
scheduleRequest.getJobMetadata().getTotalStages(),
ports,
getTimeoutSecsToReportStart(),
launchTaskRequest.getPorts().getMetricsPort(),
scheduleRequest.getJobMetadata().getParameters(),
scheduleRequest.getJobMetadata().getSchedulingInfo(),
scheduleRequest.getDurationType(),
scheduleRequest.getJobMetadata().getHeartbeatIntervalSecs(),
scheduleRequest.getJobMetadata().getSubscriptionTimeoutSecs(),
scheduleRequest.getJobMetadata().getMinRuntimeSecs() - (System.currentTimeMillis() - scheduleRequest.getJobMetadata().getMinRuntimeSecs()),
launchTaskRequest.getPorts(),
Optional.empty(),
"user");
taskInfoBuilder
.setName(name)
.setTaskId(taskId)
.setSlaveId(slaveID)
.addResources(
Resource.newBuilder()
.setName("cpus")
.setType(Value.Type.SCALAR)
.setScalar(
Value.Scalar.newBuilder()
.setValue(machineDefinition.getCpuCores())))
.addResources(
Resource.newBuilder()
.setName("mem")
.setType(Value.Type.SCALAR)
.setScalar(
Value.Scalar.newBuilder()
.setValue(machineDefinition.getMemoryMB())))
.addResources(
Resource.newBuilder()
.setName("disk")
.setType(Value.Type.SCALAR)
.setScalar(
Value.Scalar.newBuilder()
.setValue(machineDefinition.getDiskMB())))
.addResources(
Resource.newBuilder()
.setName("network")
.setType(Value.Type.SCALAR)
.setScalar(
Value.Scalar.newBuilder()
.setValue(machineDefinition.getNetworkMbps())
)
)
.setExecutor(
createMantisWorkerExecutor(executeStageRequest,
launchTaskRequest, machineDefinition.getMemoryMB(), machineDefinition.getCpuCores()))
.setData(
ByteString.copyFrom(
jsonSerializer.toJsonBytes(
executeStageRequest)));
if (!ports.isEmpty()) {
for (Integer port : ports) {
// add ports
taskInfoBuilder.addResources(
Resource
.newBuilder()
.setName("ports")
.setType(Value.Type.RANGES)
.setRanges(
Value.Ranges
.newBuilder()
.addRange(Value.Range.newBuilder()
.setBegin(port)
.setEnd(port))));
}
}
taskInfo = taskInfoBuilder.build();
} catch (IOException e) {
throw new LaunchTaskException("Failed to build a TaskInfo instance: " + e.getMessage(), e);
}
List<TaskInfo> tasks = new ArrayList<>(1);
tasks.add(taskInfo);
return tasks;
}
private int getMemSize(int original) {
// If job asked for >999MB but <4000MB, subtract out 500 MB for JVM, meta_space, code_cache, etc.
// leaving rest for the heap, Xmx.
if (original < 4000)
return original > 999 ? original - 500 : original;
// If job asked for >4000, subtract out based on scale back percentage, but at least 500 MB
return original - Math.max((int) (original * workerJvmMemoryScaleBackPct / 100.0), 500);
}
private ExecutorInfo createMantisWorkerExecutor(final ExecuteStageRequest executeStageRequest,
final LaunchTaskRequest launchTaskRequest,
final double memoryMB,
final double cpuCores) {
final int memSize = getMemSize((int) memoryMB);
final int numCpu = (int) Math.ceil(cpuCores);
final WorkerId workerId = launchTaskRequest.getScheduleRequest().getWorkerId();
String executorName = workerId.getId();
JobMetadata jobMetadata = launchTaskRequest.getScheduleRequest().getJobMetadata();
URL jobJarUrl = jobMetadata.getJobJarUrl();
Protos.Environment.Builder envBuilder = Protos.Environment.newBuilder()
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("JOB_URL")
.setValue(jobJarUrl.toString()))
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("JOB_NAME")
.setValue(executorName))
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("WORKER_LIB_DIR")
.setValue(getWorkerLibDir()))
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("JVM_MEMORY_MB")
.setValue("" + (memSize))
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("JVM_META_SPACE_MB")
.setValue("100")
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("JVM_CODE_CACHE_SIZE_MB")
.setValue("200")
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("JVM_COMP_CLASS_SIZE_MB")
.setValue("100")
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("WORKER_INDEX")
.setValue("" + (workerId.getWorkerIndex()))
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("WORKER_NUMBER")
.setValue("" + (workerId.getWorkerNum()))
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("JOB_ID")
.setValue(workerId.getJobId())
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("MANTIS_WORKER_DEBUG_PORT")
.setValue("" + launchTaskRequest.getPorts().getDebugPort())
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("MANTIS_WORKER_CONSOLE_PORT")
.setValue("" + launchTaskRequest.getPorts().getConsolePort())
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("MANTIS_USER")
.setValue("" + jobMetadata.getUser())
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("STAGE_NUMBER")
.setValue("" + launchTaskRequest.getScheduleRequest().getStageNum())
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("NUM_CPU")
.setValue("" + numCpu)
);
// add worker info
Map<String, String> envVars = new WorkerTopologyInfo.Writer(executeStageRequest).getEnvVars();
for (Map.Entry<String, String> entry : envVars.entrySet()) {
envBuilder = envBuilder
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName(entry.getKey())
.setValue(entry.getValue())
);
}
// add job parameters
for (Parameter parameter : executeStageRequest.getParameters()) {
if (parameter.getName() != null && parameter.getValue() != null) {
envBuilder = envBuilder.addVariables(
Protos.Environment.Variable.newBuilder()
.setName(String.format("JOB_PARAM_" + parameter.getName()))
.setValue(parameter.getValue())
);
}
}
// add ZooKeeper properties
Protos.Environment env = envBuilder
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("mantis.zookeeper.connectString")
.setValue(masterConfig.getZkConnectionString())
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("mantis.zookeeper.root")
.setValue(masterConfig.getZkRoot())
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("mantis.zookeeper.leader.announcement.path")
.setValue(masterConfig.getLeaderAnnouncementPath())
)
.addVariables(
Protos.Environment.Variable.newBuilder()
.setName("MASTER_DESCRIPTION")
.setValue(masterDescriptionJson))
.build();
return ExecutorInfo.newBuilder()
.setExecutorId(ExecutorID.newBuilder().setValue(executorName))
.setCommand(
CommandInfo.newBuilder()
.setValue(getWorkerExecutorStartupScriptFullPath())
.setEnvironment(env))
.setName(getWorkerExecutorName())
.setSource(workerId.getJobId())
.build();
}
@Override
public void killTask(final WorkerId workerId) {
if (!super.getIsInited()) {
logger.error("Not in leader mode, not killing task");
return;
}
final String taskIdString = workerId.getId();
logger.info("Calling mesos to kill " + taskIdString);
try {
Protos.Status status = mesosDriver.get().killTask(
TaskID.newBuilder()
.setValue(taskIdString)
.build());
logger.info("Kill status = " + status);
switch (status) {
case DRIVER_ABORTED:
case DRIVER_STOPPED:
logger.error("Unexpected to see Mesos driver status of " + status + " from kill task request. Committing suicide!");
System.exit(2);
}
} catch (RuntimeException e) {
// IllegalStateException from no mesosDriver's addVMLeaseAction or NPE from mesosDriver.get() being null.
if (mesosDriver.get() == null) {
logger.warn("mesosDriver supplier returned null mesosDriver");
}
logger.error("Unexpected to see Mesos driver not initialized", e);
System.exit(2);
}
}
@Override
public void start() {
super.awaitActiveModeAndStart(new Action0() {
@Override
public void call() {
logger.info("Registering Mantis Framework with Mesos");
if (!initializationDone.compareAndSet(false, true))
throw new IllegalStateException("Duplicate start() call");
executor.execute(() -> {
try {
logger.info("invoking the Mesos driver run");
Protos.Status status = mesosDriver.get().run();
logger.info("MesosSchedulerDriver run status {}", status);
} catch (Exception e) {
logger.error("Failed to register Mantis Framework with Mesos", e);
System.exit(2);
}
});
}
});
}
@Override
public void shutdown() {
logger.info("Unregistering Mantis Framework with Mesos");
mesosDriver.shutdown();
executor.shutdown();
}
public String getMesosMasterHostAndPort() {
return masterConfig.getMasterLocation();
}
public String getWorkerInstallDir() {
return masterConfig.getWorkerInstallDir();
}
public String getWorkerLibDir() {
return Paths.get(getWorkerInstallDir(), "libs").toString();
}
private String getWorkerExecutorScript() {
return masterConfig.getWorkerExecutorScript();
}
private boolean getUseSlaveFiltering() {
return masterConfig.getUseSlaveFiltering();
}
private String getSlaveFilterAttributeName() {
return masterConfig.getSlaveFilterAttributeName();
}
public String getWorkerBinDir() {
return Paths.get(getWorkerInstallDir(), "bin").toString();
}
private String getWorkerExecutorStartupScriptFullPath() {
return Paths.get(getWorkerBinDir(), getWorkerExecutorScript()).toString();
}
public String getMantisFrameworkName() {
return masterConfig.getMantisFrameworkName();
}
public String getWorkerExecutorName() {
return masterConfig.getWorkerExecutorName();
}
public long getTimeoutSecsToReportStart() {
return masterConfig.getTimeoutSecondsToReportStart();
}
private double getMesosFailoverTimeoutSecs() {
return masterConfig.getMesosFailoverTimeOutSecs();
}
}
| 7,983 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/mesos/MesosDriverSupplier.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.mesos;
import com.netflix.fenzo.VirtualMachineLease;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import io.mantisrx.server.master.scheduler.WorkerRegistry;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
import org.apache.mesos.MesosSchedulerDriver;
import org.apache.mesos.Protos;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
import rx.functions.Action1;
public class MesosDriverSupplier implements Supplier<MesosSchedulerDriver> {
private static final Logger logger = LoggerFactory.getLogger(MesosDriverSupplier.class);
private final MasterConfiguration masterConfig;
private final Observer<String> vmLeaseRescindedObserver;
private final JobMessageRouter jobMessageRouter;
private final WorkerRegistry workerRegistry;
private final AtomicReference<MesosSchedulerDriver> mesosDriverRef = new AtomicReference<>(null);
private final AtomicBoolean isInitialized = new AtomicBoolean(false);
private volatile Action1<List<VirtualMachineLease>> addVMLeaseAction = null;
private final AtomicInteger numAttemptsToInit = new AtomicInteger(0);
public MesosDriverSupplier(final MasterConfiguration masterConfig,
final Observer<String> vmLeaseRescindedObserver,
final JobMessageRouter jobMessageRouter,
final WorkerRegistry workerRegistry) {
this.masterConfig = masterConfig;
this.vmLeaseRescindedObserver = vmLeaseRescindedObserver;
this.jobMessageRouter = jobMessageRouter;
this.workerRegistry = workerRegistry;
}
Optional<MesosSchedulerDriver> initMesosSchedulerDriverWithTimeout(MesosSchedulerCallbackHandler mesosSchedulerCallbackHandler,
Protos.FrameworkInfo framework) {
ExecutorService executorService = Executors.newSingleThreadExecutor();
int mesosSchedulerDriverInitTimeoutSec = masterConfig.getMesosSchedulerDriverInitTimeoutSec();
logger.info("initializing mesos scheduler driver with timeout of {} sec", mesosSchedulerDriverInitTimeoutSec);
Optional<MesosSchedulerDriver> mesosSchedulerDriverO = Optional.empty();
try {
Future<MesosSchedulerDriver> driverF = executorService.submit(() -> new MesosSchedulerDriver(mesosSchedulerCallbackHandler, framework, masterConfig.getMasterLocation()));
MesosSchedulerDriver mesosSchedulerDriver = driverF.get(mesosSchedulerDriverInitTimeoutSec, TimeUnit.SECONDS);
mesosSchedulerDriverO = Optional.ofNullable(mesosSchedulerDriver);
} catch (Exception e) {
logger.info("failed to initialize MesosSchedulerDriver", e);
} finally {
executorService.shutdown();
}
return mesosSchedulerDriverO;
}
@Override
public MesosSchedulerDriver get() {
if (addVMLeaseAction == null) {
logger.warn("addVMLeaseAction is null, attempt to get Mesos Driver before MesosDriverSupplier init");
throw new IllegalStateException("addVMLeaseAction must be set before creating MesosSchedulerDriver");
}
if (isInitialized.compareAndSet(false, true)) {
if (numAttemptsToInit.incrementAndGet() > masterConfig.getMesosSchedulerDriverInitMaxAttempts()) {
logger.error("Failed to initialize Mesos scheduler driver after {} attempts, will terminate master",
numAttemptsToInit.get() - 1);
System.exit(2);
}
logger.info("initializing mesos scheduler callback handler");
final MesosSchedulerCallbackHandler mesosSchedulerCallbackHandler =
new MesosSchedulerCallbackHandler(addVMLeaseAction, vmLeaseRescindedObserver, jobMessageRouter,
workerRegistry);
final Protos.FrameworkInfo framework = Protos.FrameworkInfo.newBuilder()
.setUser(masterConfig.getMantisFrameworkUserName())
.setName(masterConfig.getMantisFrameworkName())
.setFailoverTimeout(masterConfig.getMesosFailoverTimeOutSecs())
.setId(Protos.FrameworkID.newBuilder().setValue(masterConfig.getMantisFrameworkName()))
.setCheckpoint(true)
.build();
logger.info("initializing mesos scheduler driver");
MesosSchedulerDriver mesosDriver = initMesosSchedulerDriverWithTimeout(mesosSchedulerCallbackHandler, framework).orElseGet(() -> {
logger.info("initialize MesosSchedulerDriver failed, will retry");
isInitialized.compareAndSet(true, false);
return this.get();
});
boolean result = mesosDriverRef.compareAndSet(null, mesosDriver);
logger.info("initialized mesos scheduler driver {}", result);
} else {
int sleepIntervalMillis = 1000;
int maxTimeToWaitMillis =
masterConfig.getMesosSchedulerDriverInitMaxAttempts() * masterConfig.getMesosSchedulerDriverInitTimeoutSec() * 1000;
// block maxTimeToWaitMillis till mesosDriver is not null
while (mesosDriverRef.get() == null) {
if (maxTimeToWaitMillis <= 0) {
logger.error("mesos driver init taking too long, exiting");
System.exit(2);
}
try {
logger.info("mesos scheduler driver null, sleep for 1 sec awaiting init");
Thread.sleep(sleepIntervalMillis);
maxTimeToWaitMillis -= sleepIntervalMillis;
} catch (InterruptedException e) {
logger.warn("thread interrupted during sleep", e);
Thread.currentThread().interrupt();
}
}
}
return mesosDriverRef.get();
}
public void setAddVMLeaseAction(final Action1<List<VirtualMachineLease>> addVMLeaseAction) {
Preconditions.checkNotNull(addVMLeaseAction);
this.addVMLeaseAction = addVMLeaseAction;
}
public void shutdown() {
MesosSchedulerDriver mesosSchedulerDriver = mesosDriverRef.get();
if (mesosSchedulerDriver != null) {
mesosSchedulerDriver.stop(true);
} else {
logger.info("mesos driver null, continue shutdown");
}
}
}
| 7,984 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/mesos/VirtualMachineLeaseMesosImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.mesos;
import com.netflix.fenzo.VirtualMachineLease;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.mesos.Protos;
import org.apache.mesos.Protos.Offer;
import org.apache.mesos.Protos.Resource;
import org.apache.mesos.Protos.Value;
public class VirtualMachineLeaseMesosImpl implements VirtualMachineLease {
private Offer offer;
private double cpuCores;
private double memoryMB;
private double networkMbps = 0.0;
private double diskMB;
private String hostname;
private String vmID;
private List<Range> portRanges;
private Map<String, Protos.Attribute> attributeMap;
private long offeredTime;
public VirtualMachineLeaseMesosImpl(Offer offer) {
this.offer = offer;
hostname = offer.getHostname();
this.vmID = offer.getSlaveId().getValue();
offeredTime = System.currentTimeMillis();
// parse out resources from offer
// We expect network bandwidth to be coming in as a consumable scalar resource with the name "network"
for (Resource resource : offer.getResourcesList()) {
if ("cpus".equals(resource.getName())) {
cpuCores = resource.getScalar().getValue();
} else if ("mem".equals(resource.getName())) {
memoryMB = resource.getScalar().getValue();
} else if ("disk".equals(resource.getName())) {
diskMB = resource.getScalar().getValue();
} else if ("network".equals(resource.getName())) {
networkMbps = resource.getScalar().getValue();
} else if ("ports".equals(resource.getName())) {
portRanges = new ArrayList<>();
for (Value.Range range : resource.getRanges().getRangeList()) {
portRanges.add(new Range((int) range.getBegin(), (int) range.getEnd()));
}
}
}
attributeMap = new HashMap<>();
if (offer.getAttributesCount() > 0) {
for (Protos.Attribute attribute : offer.getAttributesList()) {
attributeMap.put(attribute.getName(), attribute);
}
}
}
@Override
public String hostname() {
return hostname;
}
@Override
public String getVMID() {
return vmID;
}
@Override
public double cpuCores() {
return cpuCores;
}
@Override
public double memoryMB() {
return memoryMB;
}
@Override
public double networkMbps() {
return networkMbps;
}
@Override
public double diskMB() {
return diskMB;
}
public Offer getOffer() {
return offer;
}
@Override
public String getId() {
return offer.getId().getValue();
}
@Override
public long getOfferedTime() {
return offeredTime;
}
@Override
public List<Range> portRanges() {
return portRanges;
}
@Override
public Map<String, Protos.Attribute> getAttributeMap() {
return attributeMap;
}
@Override
public Double getScalarValue(String name) {
return null;
}
@Override
public Map<String, Double> getScalarValues() {
return Collections.emptyMap();
}
}
| 7,985 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/mesos/MesosSchedulerCallbackHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.mesos;
import com.netflix.fenzo.VirtualMachineLease;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import io.mantisrx.server.master.scheduler.WorkerRegistry;
import io.mantisrx.server.master.scheduler.WorkerResourceStatus;
import io.mantisrx.server.master.scheduler.WorkerResourceStatus.VMResourceState;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.mesos.Protos;
import org.apache.mesos.Protos.ExecutorID;
import org.apache.mesos.Protos.FrameworkID;
import org.apache.mesos.Protos.MasterInfo;
import org.apache.mesos.Protos.Offer;
import org.apache.mesos.Protos.OfferID;
import org.apache.mesos.Protos.SlaveID;
import org.apache.mesos.Protos.TaskStatus;
import org.apache.mesos.Scheduler;
import org.apache.mesos.SchedulerDriver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.functions.Action1;
public class MesosSchedulerCallbackHandler implements Scheduler {
private static final Logger logger = LoggerFactory.getLogger(MesosSchedulerCallbackHandler.class);
private final Action1<List<VirtualMachineLease>> addVMLeaseAction;
private final WorkerRegistry workerRegistry;
private final Gauge lastOfferReceivedMillis;
private final Gauge lastValidOfferReceiveMillis;
private final Counter numMesosRegistered;
private final Counter numMesosDisconnects;
private final Counter numOfferRescinded;
private final Counter numReconcileTasks;
private final Counter numInvalidOffers;
private final Counter numOfferTooSmall;
private Observer<String> vmLeaseRescindedObserver;
private JobMessageRouter jobMessageRouter;
private volatile ScheduledFuture reconcilerFuture = null;
private AtomicLong lastOfferReceivedAt = new AtomicLong(System.currentTimeMillis());
private AtomicLong lastValidOfferReceivedAt = new AtomicLong(System.currentTimeMillis());
private long reconciliationTrial = 0;
public MesosSchedulerCallbackHandler(
final Action1<List<VirtualMachineLease>> addVMLeaseAction,
final Observer<String> vmLeaseRescindedObserver,
final JobMessageRouter jobMessageRouter,
final WorkerRegistry workerRegistry) {
this.addVMLeaseAction = Preconditions.checkNotNull(addVMLeaseAction);
this.vmLeaseRescindedObserver = vmLeaseRescindedObserver;
this.jobMessageRouter = jobMessageRouter;
this.workerRegistry = workerRegistry;
Metrics m = new Metrics.Builder()
.name(MesosSchedulerCallbackHandler.class.getCanonicalName())
.addCounter("numMesosRegistered")
.addCounter("numMesosDisconnects")
.addCounter("numOfferRescinded")
.addCounter("numReconcileTasks")
.addGauge("lastOfferReceivedMillis")
.addGauge("lastValidOfferReceiveMillis")
.addCounter("numInvalidOffers")
.addCounter("numOfferTooSmall")
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
numMesosRegistered = m.getCounter("numMesosRegistered");
numMesosDisconnects = m.getCounter("numMesosDisconnects");
numOfferRescinded = m.getCounter("numOfferRescinded");
numReconcileTasks = m.getCounter("numReconcileTasks");
lastOfferReceivedMillis = m.getGauge("lastOfferReceivedMillis");
lastValidOfferReceiveMillis = m.getGauge("lastValidOfferReceiveMillis");
numInvalidOffers = m.getCounter("numInvalidOffers");
numOfferTooSmall = m.getCounter("numOfferTooSmall");
Observable
.interval(10, 10, TimeUnit.SECONDS)
.doOnNext(aLong -> {
lastOfferReceivedMillis.set(System.currentTimeMillis() - lastOfferReceivedAt.get());
lastValidOfferReceiveMillis.set(System.currentTimeMillis() - lastValidOfferReceivedAt.get());
})
.subscribe();
}
// simple offer resource validator
private boolean validateOfferResources(Offer offer) {
for (Protos.Resource resource : offer.getResourcesList()) {
if ("cpus".equals(resource.getName())) {
final double cpus = resource.getScalar().getValue();
if (cpus < 0.1) {
logger.warn("Declining offer due to too few CPUs in offer from " + offer.getHostname() +
": " + cpus);
return false;
}
} else if ("mem".equals(resource.getName())) {
double memoryMB = resource.getScalar().getValue();
if (memoryMB < 1) {
logger.warn("Declining offer due to too few memory in offer from " + offer.getHostname() +
": " + memoryMB);
return false;
}
}
}
return true;
}
@Override
public void resourceOffers(SchedulerDriver driver, List<Offer> offers) {
lastOfferReceivedAt.set(System.currentTimeMillis());
double refuseSecs = 10000;
final List<VirtualMachineLease> leases = new ArrayList<>();
for (Offer offer : offers) {
// if(!filterActiveVMs(offer)) {
// // decline offer from inactive VMs
// logger.info("Declining offer from host that is not active: " + offer.getHostname());
// driver.declineOffer(offer.getId(), (Protos.Filters.getDefaultInstance().toBuilder()).setRefuseSeconds(60).build());
// numInvalidOffers.increment();
// continue;
// }
if (ConfigurationProvider.getConfig().getUseSlaveFiltering()) {
String attrName = ConfigurationProvider.getConfig().getSlaveFilterAttributeName();
String attrValue = null;
if (offer.getAttributesCount() > 0) {
for (Protos.Attribute attribute : offer.getAttributesList()) {
if (attrName.equals(attribute.getName())) {
attrValue = attribute.getText().getValue();
break;
}
}
}
if (attrValue == null || !attrValue.equals(System.getenv(attrName))) {
driver.declineOffer(offer.getId(), (Protos.Filters.getDefaultInstance().toBuilder()).setRefuseSeconds(refuseSecs).build());
logger.warn("Declining offer from host " + offer.getHostname() + " due to missing attribute value for " + attrName + " - expecting [" +
System.getenv(attrName) + "] got [" + attrValue + "]");
numInvalidOffers.increment();
continue;
}
}
if (!validateOfferResources(offer)) {
// decline for a minute
driver.declineOffer(offer.getId(), (Protos.Filters.getDefaultInstance().toBuilder()).setRefuseSeconds(60).build());
numOfferTooSmall.increment();
continue;
}
leases.add(new VirtualMachineLeaseMesosImpl(offer));
lastValidOfferReceivedAt.set(System.currentTimeMillis());
}
addVMLeaseAction.call(leases);
}
// private boolean filterActiveVMs(Offer offer) {
// if(activeSlaveAttributeName==null || activeSlaveAttributeName.isEmpty())
// return true; // not filtering
// final List<String> list = activeSlaveAttributeValuesGetter.call();
// if(list==null || list.isEmpty())
// return true; // all are active
// if(offer.getAttributesCount()>0) {
// for(Protos.Attribute attribute: offer.getAttributesList()) {
// if(activeSlaveAttributeName.equals(attribute.getName())) {
// if(isIn(attribute.getText().getValue(), list))
// return true;
// }
// }
// }
// else
// logger.info("Filtering slave with no attributes: " + offer.getHostname());
// return false;
// }
private boolean isIn(String value, List<String> list) {
if (value == null || value.isEmpty() || list == null || list.isEmpty())
return false;
for (String s : list)
if (value.equals(s))
return true;
return false;
}
@Override
public void disconnected(SchedulerDriver arg0) {
logger.warn("Mesos driver disconnected: " + arg0);
numMesosDisconnects.increment();
}
@Override
public void error(SchedulerDriver arg0, String msg) {
logger.error("Error from Mesos: " + msg);
}
@Override
public void executorLost(SchedulerDriver arg0, ExecutorID arg1,
SlaveID arg2, int arg3) {
logger.warn("Lost executor " + arg1.getValue() + " on slave " + arg2.getValue() + " with status=" + arg3);
}
@Override
public void frameworkMessage(SchedulerDriver arg0, ExecutorID arg1,
SlaveID arg2, byte[] arg3) {
logger.warn("Unexpected framework message: executorId=" + arg1.getValue() +
", slaveID=" + arg2.getValue() + ", message=" + arg3);
}
@Override
public void offerRescinded(SchedulerDriver arg0, OfferID arg1) {
logger.warn("Offer rescinded: offerID=" + arg1.getValue());
vmLeaseRescindedObserver.onNext(arg1.getValue());
numOfferRescinded.increment();
}
@Override
public void registered(SchedulerDriver driver, FrameworkID frameworkID,
MasterInfo masterInfo) {
logger.info("Mesos registered: " + driver + ", ID=" + frameworkID.getValue() + ", masterInfo=" + masterInfo.getId());
initializeNewDriver(driver);
numMesosRegistered.increment();
}
@Override
public void reregistered(SchedulerDriver driver, MasterInfo arg1) {
logger.info("Mesos re-registered: " + driver + ", masterInfo=" + arg1.getId());
initializeNewDriver(driver);
numMesosRegistered.increment();
}
private synchronized void initializeNewDriver(final SchedulerDriver driver) {
vmLeaseRescindedObserver.onNext("ALL");
if (reconcilerFuture != null)
reconcilerFuture.cancel(true);
reconcilerFuture = new ScheduledThreadPoolExecutor(1).scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
reconcileTasks(driver);
}
}, 30, ConfigurationProvider.getConfig().getMesosTaskReconciliationIntervalSecs(), TimeUnit.SECONDS);
}
private void reconcileTasks(final SchedulerDriver driver) {
try {
if (reconciliationTrial++ % 2 == 0)
reconcileTasksKnownToUs(driver);
else
reconcileAllMesosTasks(driver);
} catch (Exception e) {
// we don't want to throw errors lest periodically scheduled reconciliation be cancelled
logger.error("Unexpected error (continuing): " + e.getMessage(), e);
}
}
private void reconcileTasksKnownToUs(SchedulerDriver driver) {
final List<TaskStatus> tasksToInitialize = new ArrayList<>();
for (Map.Entry<WorkerId, String> workerIdSlaveId : workerRegistry.getAllRunningWorkerSlaveIdMappings(null).entrySet()) {
final WorkerId workerId = workerIdSlaveId.getKey();
final String slaveId = workerIdSlaveId.getValue();
if (logger.isDebugEnabled()) {
logger.debug("reconcile running worker mapping {} -> {}", workerId.getId(), slaveId);
}
tasksToInitialize.add(TaskStatus.newBuilder()
.setTaskId(
Protos.TaskID.newBuilder()
.setValue(workerId.getId())
.build())
.setState(Protos.TaskState.TASK_RUNNING)
.setSlaveId(SlaveID.newBuilder().setValue(slaveId).build())
.build()
);
}
if (!tasksToInitialize.isEmpty()) {
Protos.Status status = driver.reconcileTasks(tasksToInitialize);
numReconcileTasks.increment();
logger.info("Sent request to reconcile " + tasksToInitialize.size() + " tasks, status=" + status);
logger.info("Last offer received " + (System.currentTimeMillis() - lastOfferReceivedAt.get()) / 1000 + " secs ago");
logger.info("Last valid offer received " + (System.currentTimeMillis() - lastValidOfferReceivedAt.get()) / 1000 + " secs ago");
switch (status) {
case DRIVER_ABORTED:
case DRIVER_STOPPED:
logger.error("Unexpected to see Mesos driver status of " + status + " from reconcile request. Committing suicide!");
System.exit(2);
}
}
}
private void reconcileAllMesosTasks(SchedulerDriver driver) {
Protos.Status status = driver.reconcileTasks(Collections.emptyList());
numReconcileTasks.increment();
logger.info("Sent request to reconcile all tasks known to Mesos");
logger.info("Last offer received " + (System.currentTimeMillis() - lastOfferReceivedAt.get()) / 1000 + " secs ago");
logger.info("Last valid offer received " + (System.currentTimeMillis() - lastValidOfferReceivedAt.get()) / 1000 + " secs ago");
switch (status) {
case DRIVER_ABORTED:
case DRIVER_STOPPED:
logger.error("Unexpected to see Mesos driver status of " + status + " from reconcile request (all tasks). Committing suicide!");
System.exit(2);
}
}
@Override
public void slaveLost(SchedulerDriver arg0, SlaveID arg1) {
logger.warn("Lost slave " + arg1.getValue());
}
@Override
public void statusUpdate(final SchedulerDriver arg0, TaskStatus arg1) {
Optional<WorkerId> workerIdO = WorkerId.fromId(arg1.getTaskId().getValue());
logger.debug("Task status update: ({}) state: {}({}) - {}",
arg1.getTaskId().getValue(),
arg1.getState(),
arg1.getState().getNumber(),
arg1.getMessage());
if (workerIdO.isPresent()) {
WorkerId workerId = workerIdO.get();
VMResourceState state;
String mesg = "Mesos task " + arg1.getState() + "-" + arg1.getMessage();
switch (arg1.getState()) {
case TASK_FAILED:
case TASK_LOST:
state = VMResourceState.FAILED;
break;
case TASK_FINISHED:
state = VMResourceState.COMPLETED;
break;
case TASK_RUNNING:
state = VMResourceState.STARTED;
break;
case TASK_STAGING:
case TASK_STARTING:
state = VMResourceState.START_INITIATED;
break;
default:
logger.warn("Unexpected Mesos task state " + arg1.getState());
return;
}
jobMessageRouter.routeWorkerEvent(new WorkerResourceStatus(workerId, mesg, state));
} else {
logger.error("Failed to parse workerId from Mesos task update {}", arg1.getTaskId().getValue());
}
}
}
| 7,986 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/http | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/http/api/JobClusterInfo.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.http.api;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.master.store.NamedJob;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
public class JobClusterInfo {
private final String name;
private final String latestVersion;
private final NamedJob.SLA sla;
private final JobOwner owner;
private final boolean disabled;
private final boolean cronActive;
private final List<JarInfo> jars;
private final List<Parameter> parameters;
private final List<Label> labels;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public JobClusterInfo(
@JsonProperty("name") String name,
@JsonProperty("sla") NamedJob.SLA sla,
@JsonProperty("owner") JobOwner owner,
@JsonProperty("disabled") boolean disabled,
@JsonProperty("cronActive") boolean cronActive,
@JsonProperty("jars") List<JarInfo> jars,
@JsonProperty("parameters") List<Parameter> parameters,
@JsonProperty("labels") List<Label> labels
) {
this.name = name;
this.sla = sla;
this.owner = owner;
this.disabled = disabled;
this.cronActive = cronActive;
this.jars = jars;
this.labels = labels;
if (jars == null || jars.isEmpty())
latestVersion = "";
else {
JarInfo latest = null;
for (JarInfo ji : jars) {
if (latest == null || ji.uploadedAt > latest.uploadedAt) {
latest = ji;
}
}
latestVersion = latest == null ? "" : latest.version;
}
this.parameters = parameters;
}
public String getName() {
return name;
}
public String getLatestVersion() {
return latestVersion;
}
public NamedJob.SLA getSla() {
return sla;
}
public JobOwner getOwner() {
return owner;
}
public boolean isDisabled() {
return disabled;
}
public boolean isCronActive() {
return cronActive;
}
public List<JarInfo> getJars() {
return jars;
}
public List<Parameter> getParameters() {
return parameters;
}
public List<Label> getLabels() {
return this.labels;
}
public static class JarInfo {
private final String version;
private final long uploadedAt;
private final String url;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public JarInfo(@JsonProperty("version") String version, @JsonProperty("uploadedAt") long uploadedAt,
@JsonProperty("url") String url) {
this.version = version;
this.uploadedAt = uploadedAt;
this.url = url;
}
public String getVersion() {
return version;
}
public long getUploadedAt() {
return uploadedAt;
}
public String getUrl() {
return url;
}
}
}
| 7,987 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/http | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/http/api/CompactJobInfo.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.http.api;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import java.util.Map;
public class CompactJobInfo {
private final String jobId;
private final long submittedAt;
private final long terminatedAt;
private final String user;
private final String jarUrl;
private final MantisJobState state;
private final MantisJobDurationType type;
private final int numStages;
private final int numWorkers;
private final double totCPUs;
private final double totMemory;
private final Map<String, Integer> statesSummary;
private final List<Label> labels;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public CompactJobInfo(
@JsonProperty("jobID") String jobId,
@JsonProperty("jarUrl") String jarUrl,
@JsonProperty("submittedAt") long submittedAt,
@JsonProperty("terminatedAt") long terminatedAt,
@JsonProperty("user") String user,
@JsonProperty("state") MantisJobState state,
@JsonProperty("type") MantisJobDurationType type,
@JsonProperty("numStages") int numStages,
@JsonProperty("numWorkers") int numWorkers,
@JsonProperty("totCPUs") double totCPUs,
@JsonProperty("totMemory") double totMemory,
@JsonProperty("statesSummary") Map<String, Integer> statesSummary,
@JsonProperty("labels") List<Label> labels
) {
this.jobId = jobId;
this.jarUrl = jarUrl;
this.submittedAt = submittedAt;
this.terminatedAt = terminatedAt;
this.user = user;
this.state = state;
this.type = type;
this.numStages = numStages;
this.numWorkers = numWorkers;
this.totCPUs = totCPUs;
this.totMemory = totMemory;
this.statesSummary = statesSummary;
this.labels = labels;
}
public String getJobId() {
return jobId;
}
public long getSubmittedAt() {
return submittedAt;
}
public long getTerminatedAt() {
return terminatedAt;
}
public String getUser() {
return user;
}
public MantisJobState getState() {
return state;
}
public MantisJobDurationType getType() {
return type;
}
public int getNumStages() {
return numStages;
}
public int getNumWorkers() {
return numWorkers;
}
public double getTotCPUs() {
return totCPUs;
}
public double getTotMemory() {
return totMemory;
}
public String getJarUrl() { return jarUrl; }
public Map<String, Integer> getStatesSummary() {
return statesSummary;
}
public List<Label> getLabels() {
return this.labels;
}
}
| 7,988 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/persistence/IMantisPersistenceProvider.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.persistence;
import io.mantisrx.master.jobcluster.IJobClusterMetadata;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.master.jobcluster.job.IMantisStageMetadata;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.resourcecluster.DisableTaskExecutorsRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleSpec;
import io.mantisrx.master.resourcecluster.writable.RegisteredResourceClustersWritable;
import io.mantisrx.master.resourcecluster.writable.ResourceClusterScaleRulesWritable;
import io.mantisrx.master.resourcecluster.writable.ResourceClusterSpecWritable;
import io.mantisrx.server.core.domain.ArtifactID;
import io.mantisrx.server.core.domain.JobArtifact;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob;
import io.mantisrx.server.master.persistence.exceptions.InvalidJobException;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import javax.annotation.Nullable;
import rx.Observable;
/**
* A way to persist mantis master related metadata to a durable storage.
* See {@link KeyValueBasedPersistenceProvider} for how mantis job cluster,
* mantis job info is persisted to a key-value based storage (like cassandra)
*/
public interface IMantisPersistenceProvider {
void storeNewJob(IMantisJobMetadata jobMetadata) throws Exception;
void updateJob(final IMantisJobMetadata jobMetadata) throws Exception;
/**
* // * Mark the job as not active and move it to an inactive archived collection of jobs.
* // * @param jobId The Job Id of the job to archive
* // * @throws IOException upon errors with storage invocation
* //
*/
void archiveJob(final String jobId) throws IOException;
void deleteJob(String jobId) throws Exception;
void storeMantisStage(final IMantisStageMetadata msmd) throws IOException;
void updateMantisStage(final IMantisStageMetadata msmd) throws IOException;
/**
* Store a new worker for the given job and stage number. This will be called only once for a given
* worker. However, it is possible that concurrent calls can be made on a <code>jobId</code>, each with a
* different worker.
*
* @param workerMetadata The worker metadata to store.
*
* @throws IOException
*/
default void storeWorker(final IMantisWorkerMetadata workerMetadata) throws IOException {
storeWorkers(Collections.singletonList(workerMetadata));
}
/**
* Store multiple new workers for the give job. This is called only once for a given worker. This method enables
* optimization by calling storage once for multiple workers.
*
* @param jobId The Job ID.
* @param workers The list of workers to store.
*
* @throws IOException if there were errors storing the workers.
*/
default void storeWorkers(final String jobId, final List<IMantisWorkerMetadata> workers) throws IOException {
storeWorkers(workers);
}
void storeWorkers(final List<IMantisWorkerMetadata> workers) throws IOException;
/**
* Store a new worker and update existing worker of a job atomically. Either both are stored or none is.
*
* @param existingWorker Existing worker to update.
* @param newWorker New worker to store.
*
* @throws IOException
* @throws InvalidJobException If workers don't have the same JobId.
* @throws Exception
*/
void storeAndUpdateWorkers(final IMantisWorkerMetadata existingWorker, final IMantisWorkerMetadata newWorker)
throws InvalidJobException, IOException, Exception;
void updateWorker(IMantisWorkerMetadata worker) throws IOException;
List<IMantisJobMetadata> loadAllJobs() throws IOException;
Observable<IMantisJobMetadata> loadAllArchivedJobs();
// /**
// * Initialize and return all existing NamedJobs from persistence.
// * @return List of {@link NamedJob} objects.
// * @throws IOException Upon error connecting to or reading from persistence.
// */
List<IJobClusterMetadata> loadAllJobClusters() throws IOException;
List<CompletedJob> loadAllCompletedJobs() throws IOException;
void archiveWorker(IMantisWorkerMetadata mwmd) throws IOException;
List<IMantisWorkerMetadata> getArchivedWorkers(String jobId) throws IOException;
void createJobCluster(IJobClusterMetadata jobCluster) throws Exception;
void updateJobCluster(IJobClusterMetadata jobCluster) throws Exception;
void deleteJobCluster(String name) throws Exception;
void storeCompletedJobForCluster(String name, CompletedJob job) throws IOException;
void removeCompletedJobForCluster(String name, String jobId) throws IOException;
Optional<IMantisJobMetadata> loadArchivedJob(String jobId) throws IOException;
//////////////////////////////////
// Optional<IJobClusterMetadata> getJobCluster(String clusterName) throws Exception;
//Optional<IJobClusterMetadata> loadJobCluster(String clusterName);
//
// CompletionStage<Void> shutdown();
//
List<String> initActiveVmAttributeValuesList() throws IOException;
void setActiveVmAttributeValuesList(final List<String> vmAttributesList) throws IOException;
TaskExecutorRegistration getTaskExecutorFor(TaskExecutorID taskExecutorID) throws IOException;
void storeNewTaskExecutor(TaskExecutorRegistration registration) throws IOException;
void storeNewDisableTaskExecutorRequest(DisableTaskExecutorsRequest request) throws IOException;
void deleteExpiredDisableTaskExecutorRequest(DisableTaskExecutorsRequest request) throws IOException;
List<DisableTaskExecutorsRequest> loadAllDisableTaskExecutorsRequests(ClusterID clusterID) throws IOException;
boolean isArtifactExists(String resourceId) throws IOException;
JobArtifact getArtifactById(String resourceId) throws IOException;
List<JobArtifact> listJobArtifacts(String name, String version) throws IOException;
void addNewJobArtifactsToCache(ClusterID clusterID, List<ArtifactID> artifacts) throws IOException;
void removeJobArtifactsToCache(ClusterID clusterID, List<ArtifactID> artifacts) throws IOException;
List<String> listJobArtifactsToCache(ClusterID clusterID) throws IOException;
List<String> listJobArtifactsByName(String prefix, String contains) throws IOException;
void addNewJobArtifact(JobArtifact jobArtifact) throws IOException;
/**
* Register and save the given cluster spec. Once the returned CompletionStage
* finishes successfully the given cluster should be available in list cluster response.
*/
ResourceClusterSpecWritable registerAndUpdateClusterSpec(ResourceClusterSpecWritable spec) throws IOException;
RegisteredResourceClustersWritable deregisterCluster(ClusterID clusterId) throws IOException;
RegisteredResourceClustersWritable getRegisteredResourceClustersWritable() throws IOException;
@Nullable
ResourceClusterSpecWritable getResourceClusterSpecWritable(ClusterID id) throws IOException;
ResourceClusterScaleRulesWritable getResourceClusterScaleRules(ClusterID clusterId) throws IOException;
ResourceClusterScaleRulesWritable registerResourceClusterScaleRule(
ResourceClusterScaleRulesWritable ruleSpec) throws IOException;
ResourceClusterScaleRulesWritable registerResourceClusterScaleRule(ResourceClusterScaleSpec rule) throws IOException;
}
| 7,989 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/persistence/KeyValueBasedPersistenceProvider.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.persistence;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.jobcluster.IJobClusterMetadata;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.master.jobcluster.job.IMantisStageMetadata;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.master.resourcecluster.DisableTaskExecutorsRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleSpec;
import io.mantisrx.master.resourcecluster.writable.RegisteredResourceClustersWritable;
import io.mantisrx.master.resourcecluster.writable.ResourceClusterScaleRulesWritable;
import io.mantisrx.master.resourcecluster.writable.ResourceClusterSpecWritable;
import io.mantisrx.server.core.domain.ArtifactID;
import io.mantisrx.server.core.domain.JobArtifact;
import io.mantisrx.server.master.domain.DataFormatAdapter;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.server.master.store.KeyValueStore;
import io.mantisrx.server.master.store.MantisJobMetadataWritable;
import io.mantisrx.server.master.store.MantisStageMetadata;
import io.mantisrx.server.master.store.MantisStageMetadataWritable;
import io.mantisrx.server.master.store.MantisWorkerMetadataWritable;
import io.mantisrx.server.master.store.NamedJob;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
/**
* Provide a key-value aware implementation of IMantisStorageProvider.
* This has all the table-models and mantis specific logic for how
* mantis master should store job, cluster and related metadata
* assuming the underlying storage provides key based lookups.
* <p>
* The instance of this class needs an actual key-value storage
* implementation (implements KeyValueStorageProvider) to function.
* <p>
* Look at {@code io.mantisrx.server.master.store.KeyValueStorageProvider}
* to see the features needed from the storage.
* Effectively, an apache-cassandra like storage with primary key made
* up of partition key and composite key and a way to iterate over all
* partition keys in the table.
*/
public class KeyValueBasedPersistenceProvider implements IMantisPersistenceProvider {
private static final Logger logger = LoggerFactory.getLogger(KeyValueBasedPersistenceProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private static final String JOB_STAGEDATA_NS = "MantisJobStageData";
private static final String ARCHIVED_JOB_STAGEDATA_NS = "MantisArchivedJobStageData";
private static final String WORKERS_NS = "MantisWorkers";
private static final String ARCHIVED_WORKERS_NS = "MantisArchivedWorkers";
private static final String NAMED_JOBS_NS = "MantisNamedJobs";
private static final String NAMED_COMPLETEDJOBS_NS = "MantisNamedJobCompletedJobs";
private static final String ACTIVE_ASGS_NS = "MantisActiveASGs";
private static final String TASK_EXECUTOR_REGISTRATION = "TaskExecutorRegistration";
private static final String DISABLE_TASK_EXECUTOR_REQUESTS = "MantisDisableTaskExecutorRequests";
private static final String CONTROLPLANE_NS = "mantis_controlplane";
private static final String JOB_ARTIFACTS_NS = "mantis_global_job_artifacts";
private static final String JOB_ARTIFACTS_TO_CACHE_PER_CLUSTER_ID_NS = "mantis_global_cached_artifacts";
private static final String JOB_METADATA_SECONDARY_KEY = "jobMetadata";
private static final String JOB_STAGE_METADATA_SECONDARY_KEY_PREFIX = "stageMetadata";
private static final String NAMED_JOB_SECONDARY_KEY = "jobNameInfo";
private static final String JOB_ARTIFACTS_BY_NAME_PARTITION_KEY = "JobArtifactsByName";
private static final int WORKER_BATCH_SIZE = 1000;
private static final int WORKER_MAX_INDEX = 30000;
private static final long TTL_IN_MS = TimeUnit.DAYS.toMillis(7);
static {
mapper
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.registerModule(new Jdk8Module())
.registerModule(new JavaTimeModule());
}
private final KeyValueStore kvStore;
private final LifecycleEventPublisher eventPublisher;
private final Counter noWorkersFoundCounter;
private final Counter workersFoundCounter;
public KeyValueBasedPersistenceProvider(KeyValueStore kvStore, LifecycleEventPublisher eventPublisher) {
this.kvStore = kvStore;
this.eventPublisher = eventPublisher;
Metrics m = new Metrics.Builder()
.id("storage")
.addCounter("noWorkersFound")
.addCounter("workersFound")
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
this.noWorkersFoundCounter = m.getCounter("noWorkersFound");
this.workersFoundCounter = m.getCounter("workersFound");
}
protected String getJobMetadataFieldName() {
return JOB_METADATA_SECONDARY_KEY;
}
protected String getStageMetadataFieldPrefix() {
return JOB_STAGE_METADATA_SECONDARY_KEY_PREFIX;
}
protected String getJobArtifactsByNamePartitionKey() {
return JOB_ARTIFACTS_BY_NAME_PARTITION_KEY;
}
protected Duration getArchiveDataTtlInMs() {
return Duration.ofMillis(TTL_IN_MS);
}
protected String getJobStageFieldName(int stageNum) {
return String.format("%s-%d", getStageMetadataFieldPrefix(), stageNum);
}
protected String getJobClusterFieldName() {
return NAMED_JOB_SECONDARY_KEY;
}
private boolean jobIsValid(MantisJobMetadataWritable job) {
final int numStages = job.getNumStages();
final Collection<? extends MantisStageMetadata> stageMetadata = job.getStageMetadata();
if (stageMetadata == null) {
logger.error("Could not find stage metadata for jobId {}", job.getJobId());
return false;
}
if (stageMetadata.size() != numStages) {
logger.error("Invalid stage metadata for job {}: stage count mismatch expected {} vs found {}",
job.getJobId(), numStages, stageMetadata.size());
return false;
}
return true;
}
private MantisJobMetadataWritable readJobStageData(final String namespace, final String jobId)
throws IOException {
return readJobStageData(jobId, kvStore.getAll(namespace, jobId));
}
private MantisJobMetadataWritable readJobStageData(final String jobId, final Map<String, String> items) throws IOException {
String jobMetadataColumnName = getJobMetadataFieldName();
final AtomicReference<MantisJobMetadataWritable> wrapper = new AtomicReference<>();
final List<MantisStageMetadataWritable> stages = new LinkedList<>();
items.forEach(
(k, v) -> {
try {
if (k != null && v != null) {
if (jobMetadataColumnName.equals(k)) {
wrapper.set(mapper.readValue(v, MantisJobMetadataWritable.class));
} else if (k.startsWith(getStageMetadataFieldPrefix())) {
stages.add(mapper.readValue(v, MantisStageMetadataWritable.class));
}
}
} catch (JsonProcessingException e) {
logger.warn(
"failed to deserialize job metadata for jobId {}, column name {}", jobId, k, e);
}
});
final MantisJobMetadataWritable job = wrapper.get();
if (job == null) {
throw new IOException("No " + jobMetadataColumnName + " column found for key jobId=" + jobId);
}
if (stages.isEmpty()) {
throw new IOException(
"No stage metadata columns with prefix "
+ getStageMetadataFieldPrefix()
+ " found for jobId="
+ jobId);
}
for (MantisStageMetadataWritable msmd : stages) {
job.addJobStageIfAbsent(msmd);
}
if (jobIsValid(job)) {
return job;
}
throw new IOException(String.format("Invalid job for jobId %s", jobId));
}
@Override
public void storeNewJob(IMantisJobMetadata jobMetadata) throws Exception {
MantisJobMetadataWritable mjmw = DataFormatAdapter.convertMantisJobMetadataToMantisJobMetadataWriteable(jobMetadata);
try {
kvStore.upsert(JOB_STAGEDATA_NS, jobMetadata.getJobId().toString(), getJobMetadataFieldName(), mapper.writeValueAsString(mjmw));
} catch (IOException e) {
throw new Exception(e);
}
}
@Override
public void updateJob(IMantisJobMetadata jobMetadata) throws Exception {
MantisJobMetadataWritable mjmw = DataFormatAdapter.convertMantisJobMetadataToMantisJobMetadataWriteable(jobMetadata);
kvStore.upsert(JOB_STAGEDATA_NS, jobMetadata.getJobId().toString(), getJobMetadataFieldName(), mapper.writeValueAsString(mjmw));
}
@Override
public void archiveJob(String jobId) throws IOException {
Map<String, String> all = kvStore.getAll(JOB_STAGEDATA_NS, jobId);
int workerMaxPartitionKey = workerMaxPartitionKey(readJobStageData(jobId, all));
kvStore.upsertAll(ARCHIVED_JOB_STAGEDATA_NS, jobId, all, getArchiveDataTtlInMs());
kvStore.deleteAll(JOB_STAGEDATA_NS, jobId);
for (int i = 0; i < workerMaxPartitionKey; i += WORKER_BATCH_SIZE) {
String pkey = makeBucketizedPartitionKey(jobId, i);
Map<String, String> workersData = kvStore.getAll(WORKERS_NS, pkey);
kvStore.upsertAll(ARCHIVED_WORKERS_NS, pkey, workersData, getArchiveDataTtlInMs());
kvStore.deleteAll(WORKERS_NS, pkey);
}
}
@Override
public void deleteJob(String jobId) throws Exception {
MantisJobMetadataWritable jobMeta = readJobStageData(JOB_STAGEDATA_NS, jobId);
int workerMaxPartitionKey = workerMaxPartitionKey(jobMeta);
kvStore.deleteAll(JOB_STAGEDATA_NS, jobId);
rangeOperation(workerMaxPartitionKey, idx -> {
try {
kvStore.deleteAll(WORKERS_NS, makeBucketizedPartitionKey(jobId, idx));
} catch (IOException e) {
logger.warn("failed to delete worker for jobId {} with index {}", jobId, idx, e);
}
});
// delete from archive as well
kvStore.deleteAll(ARCHIVED_JOB_STAGEDATA_NS, jobId);
rangeOperation(workerMaxPartitionKey, idx -> {
try {
kvStore.deleteAll(ARCHIVED_WORKERS_NS, makeBucketizedPartitionKey(jobId, idx));
} catch (IOException e) {
logger.warn("failed to delete worker for jobId {} with index {}", jobId, idx, e);
}
});
}
@Override
public void storeMantisStage(IMantisStageMetadata msmd) throws IOException {
MantisStageMetadataWritable msmw = DataFormatAdapter.convertMantisStageMetadataToMantisStageMetadataWriteable(msmd);
kvStore.upsert(JOB_STAGEDATA_NS, msmd.getJobId().toString(), getJobStageFieldName(msmd.getStageNum()), mapper.writeValueAsString(msmw));
}
@Override
public void updateMantisStage(IMantisStageMetadata msmd) throws IOException {
storeMantisStage(msmd);
}
private int workerMaxPartitionKey(MantisJobMetadataWritable jobMetadata) {
try {
return jobMetadata.getNextWorkerNumberToUse();
} catch (Exception ignored) {
}
// big number in case we don't find the job
return WORKER_MAX_INDEX;
}
private int bucketizePartitionKey(int num) {
num = Math.max(1, num);
return (int) (WORKER_BATCH_SIZE * Math.ceil(1.0 * num / WORKER_BATCH_SIZE));
}
private String makeBucketizedPartitionKey(String pkeyPart, int suffix) {
int bucketized = bucketizePartitionKey(suffix);
return String.format("%s-%d", pkeyPart, bucketized);
}
private String makeBucketizedSecondaryKey(int stageNum, int workerIdx, int workerNum) {
return String.format("%d-%d-%d", stageNum, workerIdx, workerNum);
}
private void rangeOperation(int nextJobNumber, Consumer<Integer> fn) {
int maxIndex = bucketizePartitionKey(nextJobNumber);
for (int i = WORKER_BATCH_SIZE; i <= maxIndex; i += WORKER_BATCH_SIZE) {
fn.accept(i);
}
}
@Override
public void storeWorker(IMantisWorkerMetadata workerMetadata) throws IOException {
storeWorkers(workerMetadata.getJobId(), Collections.singletonList(workerMetadata));
}
@Override
public void storeWorkers(List<IMantisWorkerMetadata> workers) throws IOException {
for (IMantisWorkerMetadata worker : workers) {
final MantisWorkerMetadataWritable mwmw = DataFormatAdapter.convertMantisWorkerMetadataToMantisWorkerMetadataWritable(worker);
final String pkey = makeBucketizedPartitionKey(mwmw.getJobId(), mwmw.getWorkerNumber());
final String skey = makeBucketizedSecondaryKey(mwmw.getStageNum(), mwmw.getWorkerIndex(), mwmw.getWorkerNumber());
kvStore.upsert(WORKERS_NS, pkey, skey, mapper.writeValueAsString(mwmw));
}
}
@Override
public void storeAndUpdateWorkers(IMantisWorkerMetadata existingWorker, IMantisWorkerMetadata newWorker) throws IOException {
storeWorkers(ImmutableList.of(existingWorker, newWorker));
}
@Override
public void updateWorker(IMantisWorkerMetadata worker) throws IOException {
storeWorker(worker);
}
private Map<String, List<MantisWorkerMetadataWritable>> getAllWorkersByJobId(final String namespace) throws IOException {
Map<String, List<MantisWorkerMetadataWritable>> workersByJobId = new HashMap<>();
for (Map.Entry<String, Map<String, String>> worker : kvStore.getAllRows(namespace).entrySet()) {
if (worker.getValue().values().size() <= 0) {
continue;
}
List<MantisWorkerMetadataWritable> workers = worker.getValue().values().stream()
.map(data -> {
try {
return mapper.readValue(data, MantisWorkerMetadataWritable.class);
} catch (JsonProcessingException e) {
logger.warn("failed to parse worker against pkey {} json {}", worker.getKey(), data, e);
return null;
}
})
.filter(Objects::nonNull)
.collect(Collectors.toList());
workersByJobId
.computeIfAbsent(workers.get(0).getJobId(), k -> Lists.newArrayList())
.addAll(workers);
}
return workersByJobId;
}
@Override
public List<IMantisJobMetadata> loadAllJobs() throws IOException {
logger.info("MantisStorageProviderAdapter:Enter loadAllJobs");
final Map<String, List<MantisWorkerMetadataWritable>> workersByJobId = getAllWorkersByJobId(WORKERS_NS);
final List<IMantisJobMetadata> jobMetas = Lists.newArrayList();
final Map<String, Map<String, String>> allRows = kvStore.getAllRows(JOB_STAGEDATA_NS);
for (Map.Entry<String, Map<String, String>> jobInfo : allRows.entrySet()) {
final String jobId = jobInfo.getKey();
try {
final MantisJobMetadataWritable jobMeta = readJobStageData(jobId, jobInfo.getValue());
if (CollectionUtils.isEmpty(workersByJobId.get(jobId))) {
logger.warn("No workers found for job {}, skipping", jobId);
noWorkersFoundCounter.increment();
continue;
}
workersFoundCounter.increment();
for (MantisWorkerMetadataWritable workerMeta : workersByJobId.get(jobId)) {
Preconditions.checkState(
jobMeta.addWorkerMedata(workerMeta.getStageNum(), workerMeta, null),
"JobID=%s stage=%d workerIdx=%d has existing worker, existing=%s, new=%s",
workerMeta.getJobId(),
workerMeta.getStageNum(),
workerMeta.getWorkerIndex(),
jobMeta.getWorkerByIndex(workerMeta.getStageNum(), workerMeta.getWorkerIndex()).getWorkerId(),
workerMeta.getWorkerId());
}
jobMetas.add(DataFormatAdapter.convertMantisJobWriteableToMantisJobMetadata(jobMeta, eventPublisher));
} catch (Exception e) {
logger.warn("Exception loading job {}", jobId, e);
}
}
// need to load all workers for the jobMeta and then ensure they are added to jobMetas!
logger.info("MantisStorageProviderAdapter:Exit loadAllJobs {}", jobMetas.size());
return jobMetas;
}
@Override
public Observable<IMantisJobMetadata> loadAllArchivedJobs() {
return Observable.create(
subscriber -> {
try {
for (String pkey : kvStore.getAllPartitionKeys(ARCHIVED_JOB_STAGEDATA_NS)) {
Optional<IMantisJobMetadata> jobMetaOpt = loadArchivedJob(pkey);
jobMetaOpt.ifPresent(subscriber::onNext);
}
subscriber.onCompleted();
} catch (IOException e) {
subscriber.onError(e);
}
});
}
@Override
public List<IJobClusterMetadata> loadAllJobClusters() throws IOException {
AtomicInteger failedCount = new AtomicInteger();
AtomicInteger successCount = new AtomicInteger();
final List<IJobClusterMetadata> jobClusters = Lists.newArrayList();
for (Map.Entry<String, Map<String, String>> rows : kvStore.getAllRows(NAMED_JOBS_NS).entrySet()) {
String name = rows.getKey();
try {
String data = rows.getValue().get(getJobClusterFieldName());
final NamedJob jobCluster = getJobCluster(NAMED_JOBS_NS, name, data);
jobClusters.add(DataFormatAdapter.convertNamedJobToJobClusterMetadata(jobCluster));
successCount.getAndIncrement();
} catch (Exception e) {
logger.error("Exception {} getting job cluster for {} ", e.getMessage(), name, e);
failedCount.getAndIncrement();
}
}
return jobClusters;
}
@Override
public List<CompletedJob> loadAllCompletedJobs() throws IOException {
AtomicInteger failedCount = new AtomicInteger();
AtomicInteger successCount = new AtomicInteger();
final List<CompletedJob> completedJobsList = kvStore.getAllRows(NAMED_COMPLETEDJOBS_NS)
.values().stream()
.flatMap(x -> x.values().stream())
.map(data -> {
try {
NamedJob.CompletedJob cj = mapper.readValue(data, NamedJob.CompletedJob.class);
CompletedJob completedJob = DataFormatAdapter.convertNamedJobCompletedJobToCompletedJob(cj);
successCount.getAndIncrement();
return completedJob;
} catch (JsonProcessingException e) {
logger.warn("failed to parse CompletedJob from {}", data, e);
failedCount.getAndIncrement();
}
return null;
})
.filter(Objects::nonNull)
.collect(Collectors.toList());
logger.info("Read and converted job clusters. Successful - {}, Failed - {}", successCount.get(), failedCount.get());
return completedJobsList;
}
@Override
public void archiveWorker(IMantisWorkerMetadata mwmd) throws IOException {
MantisWorkerMetadataWritable worker = DataFormatAdapter.convertMantisWorkerMetadataToMantisWorkerMetadataWritable(mwmd);
String pkey = makeBucketizedPartitionKey(worker.getJobId(), worker.getWorkerNumber());
String skey = makeBucketizedSecondaryKey(worker.getStageNum(), worker.getWorkerIndex(), worker.getWorkerNumber());
kvStore.delete(WORKERS_NS, pkey, skey);
kvStore.upsert(ARCHIVED_WORKERS_NS, pkey, skey, mapper.writeValueAsString(worker), getArchiveDataTtlInMs());
}
@Override
public List<IMantisWorkerMetadata> getArchivedWorkers(String jobId) throws IOException {
// try loading the active job first and then the archived job
MantisJobMetadataWritable jobInfo;
try {
jobInfo = readJobStageData(JOB_STAGEDATA_NS, jobId);
} catch (Exception e) {
jobInfo = readJobStageData(ARCHIVED_JOB_STAGEDATA_NS, jobId);
}
if (jobInfo == null) {
return Collections.emptyList();
}
int workerMaxPartitionKey = workerMaxPartitionKey(jobInfo);
final List<IMantisWorkerMetadata> archivedWorkers = Lists.newArrayList();
rangeOperation(workerMaxPartitionKey, idx -> {
String pkey = makeBucketizedPartitionKey(jobId, idx);
final Map<String, String> items;
try {
items = kvStore.getAll(ARCHIVED_WORKERS_NS, pkey);
for (Map.Entry<String, String> entry : items.entrySet()) {
try {
final JobWorker jobWorker = DataFormatAdapter.convertMantisWorkerMetadataWriteableToMantisWorkerMetadata(
mapper.readValue(entry.getValue(), MantisWorkerMetadataWritable.class),
eventPublisher);
archivedWorkers.add(jobWorker.getMetadata());
} catch (Exception e) {
logger.warn("Exception converting worker for jobId {} ({}, {})", jobId, pkey, entry.getKey(), e);
}
}
} catch (IOException e) {
logger.warn("Error reading archive workers for jobId {} for pkey {}", jobId, pkey, e);
}
});
return archivedWorkers;
}
@Override
public void createJobCluster(IJobClusterMetadata jobCluster) throws Exception {
updateJobCluster(jobCluster);
}
@Override
public void updateJobCluster(IJobClusterMetadata jobCluster) throws Exception {
kvStore.upsert(
NAMED_JOBS_NS,
jobCluster.getJobClusterDefinition().getName(),
getJobClusterFieldName(),
mapper.writeValueAsString(DataFormatAdapter.convertJobClusterMetadataToNamedJob(jobCluster)));
}
@Override
public void deleteJobCluster(String name) throws Exception {
NamedJob namedJob = getJobCluster(NAMED_JOBS_NS, name);
kvStore.deleteAll(NAMED_JOBS_NS, name);
rangeOperation((int) namedJob.getNextJobNumber(),
idx -> {
try {
removeCompletedJobForCluster(name, makeBucketizedPartitionKey(name, idx));
} catch (IOException e) {
logger.warn("failed to completed job for named job {} with index {}", name, idx, e);
}
});
}
private NamedJob getJobCluster(String namespace, String name) throws Exception {
return getJobCluster(namespace, name, kvStore.get(namespace, name, getJobClusterFieldName()));
}
private NamedJob getJobCluster(String namespace, String name, String data) throws Exception {
return mapper.readValue(data, NamedJob.class);
}
private int parseJobId(String jobId) {
final int idx = jobId.lastIndexOf("-");
return Integer.parseInt(jobId.substring(idx + 1));
}
@Override
public void storeCompletedJobForCluster(String name, CompletedJob job) throws IOException {
int jobIdx = parseJobId(job.getJobId());
NamedJob.CompletedJob completedJob = DataFormatAdapter.convertCompletedJobToNamedJobCompletedJob(job);
kvStore.upsert(NAMED_COMPLETEDJOBS_NS,
makeBucketizedPartitionKey(name, jobIdx),
String.valueOf(jobIdx),
mapper.writeValueAsString(completedJob));
}
@Override
public void removeCompletedJobForCluster(String name, String jobId) throws IOException {
int jobIdx = parseJobId(jobId);
kvStore.deleteAll(NAMED_COMPLETEDJOBS_NS,
makeBucketizedPartitionKey(name, jobIdx));
}
@Override
public Optional<IMantisJobMetadata> loadArchivedJob(String jobId) throws IOException {
try {
MantisJobMetadataWritable jmw = readJobStageData(ARCHIVED_JOB_STAGEDATA_NS, jobId);
return Optional.of(DataFormatAdapter.convertMantisJobWriteableToMantisJobMetadata(jmw, eventPublisher));
} catch (Exception e) {
logger.error("Exception loading archived job {}", jobId, e);
}
return Optional.empty();
}
@Override
public List<String> initActiveVmAttributeValuesList() throws IOException {
final String data = kvStore.get(ACTIVE_ASGS_NS,
"activeASGs", "thelist");
logger.info("read active VMs data {} from Cass", data);
if (StringUtils.isBlank(data)) {
return Collections.emptyList();
}
return mapper.readValue(data, new TypeReference<List<String>>() {});
}
@Override
public void setActiveVmAttributeValuesList(List<String> vmAttributesList) throws IOException {
logger.info("Setting active ASGs {}", vmAttributesList);
kvStore.upsert(ACTIVE_ASGS_NS,
"activeASGs", "thelist",
mapper.writeValueAsString(vmAttributesList));
}
@Override
public TaskExecutorRegistration getTaskExecutorFor(TaskExecutorID taskExecutorID) throws IOException {
try {
final String value =
kvStore.get(CONTROLPLANE_NS,
TASK_EXECUTOR_REGISTRATION + "-" + taskExecutorID.getResourceId(),
taskExecutorID.getResourceId());
return mapper.readValue(value, TaskExecutorRegistration.class);
} catch (Exception e) {
throw new IOException(e);
}
}
@Override
public void storeNewTaskExecutor(TaskExecutorRegistration registration) throws IOException {
final String resourceId = registration.getTaskExecutorID().getResourceId();
final String keyId = String.format("%s-%s", TASK_EXECUTOR_REGISTRATION, resourceId);
kvStore.upsert(CONTROLPLANE_NS, keyId, resourceId,
mapper.writeValueAsString(registration));
}
@Override
public void storeNewDisableTaskExecutorRequest(DisableTaskExecutorsRequest request) throws IOException {
String data = mapper.writeValueAsString(request);
kvStore.upsert(
DISABLE_TASK_EXECUTOR_REQUESTS,
request.getClusterID().getResourceID(),
request.getHash(), data);
}
@Override
public void deleteExpiredDisableTaskExecutorRequest(DisableTaskExecutorsRequest request) throws IOException {
kvStore.delete(
DISABLE_TASK_EXECUTOR_REQUESTS,
request.getClusterID().getResourceID(),
request.getHash());
}
@Override
public List<DisableTaskExecutorsRequest> loadAllDisableTaskExecutorsRequests(ClusterID clusterID) throws IOException {
return kvStore.getAll(DISABLE_TASK_EXECUTOR_REQUESTS, clusterID.getResourceID())
.values().stream()
.map(
value -> {
try {
return mapper.readValue(value, DisableTaskExecutorsRequest.class);
} catch (Exception e) {
throw new RuntimeException(e);
}
})
.collect(Collectors.toList());
}
@Override
public boolean isArtifactExists(String resourceId) throws IOException {
return kvStore.isRowExists(JOB_ARTIFACTS_NS, resourceId, resourceId);
}
@Override
public JobArtifact getArtifactById(String resourceId) throws IOException {
String data = kvStore.get(JOB_ARTIFACTS_NS, resourceId, resourceId);
return mapper.readValue(data, JobArtifact.class);
}
@Override
public List<JobArtifact> listJobArtifacts(String name, String version) throws IOException {
final Collection<String> artifacts;
if (version == null) {
artifacts = kvStore.getAll(JOB_ARTIFACTS_NS, name).values();
} else {
artifacts = ImmutableList.of(kvStore.get(JOB_ARTIFACTS_NS, name, version));
}
return artifacts.stream()
.map(e -> {
try {
return mapper.readValue(e, JobArtifact.class);
} catch (JsonProcessingException ex) {
logger.warn("Failed to deserialize job artifact metadata for {} (data={})", name, e, ex);
return null;
}
}).filter(Objects::nonNull).collect(Collectors.toList());
}
@Override
public void addNewJobArtifactsToCache(ClusterID clusterID, List<ArtifactID> artifacts) throws IOException {
for (ArtifactID artifact: artifacts) {
kvStore.upsert(
JOB_ARTIFACTS_TO_CACHE_PER_CLUSTER_ID_NS,
clusterID.getResourceID(),
artifact.getResourceID(),
artifact.getResourceID());
}
}
@Override
public void removeJobArtifactsToCache(ClusterID clusterID, List<ArtifactID> artifacts) throws IOException {
for (ArtifactID artifact: artifacts) {
kvStore.delete(
JOB_ARTIFACTS_TO_CACHE_PER_CLUSTER_ID_NS,
clusterID.getResourceID(),
artifact.getResourceID());
}
}
@Override
public List<String> listJobArtifactsToCache(ClusterID clusterID) throws IOException {
return new ArrayList<>(kvStore.getAll(JOB_ARTIFACTS_TO_CACHE_PER_CLUSTER_ID_NS, clusterID.getResourceID())
.values());
}
@Override
public List<String> listJobArtifactsByName(String prefix, String contains) throws IOException {
Set<String> artifactNames;
if (prefix.isEmpty()) {
artifactNames = kvStore.getAll(JOB_ARTIFACTS_NS, getJobArtifactsByNamePartitionKey()).keySet();
} else {
artifactNames = kvStore.getAllWithPrefix(JOB_ARTIFACTS_NS, getJobArtifactsByNamePartitionKey(), prefix).keySet();
}
if (!contains.isEmpty()) {
return artifactNames.stream().filter(artifact -> artifact.toLowerCase().contains(contains.toLowerCase())).distinct().collect(Collectors.toList());
}
return new ArrayList<>(artifactNames);
}
private void addNewJobArtifact(String partitionKey, String secondaryKey, JobArtifact jobArtifact) {
try {
final String data = mapper.writeValueAsString(jobArtifact);
kvStore.upsert(JOB_ARTIFACTS_NS, partitionKey, secondaryKey, data);
} catch (IOException e) {
logger.error("Error while storing keyId {} for artifact {}", partitionKey, jobArtifact, e);
throw new RuntimeException(e);
}
}
@Override
public void addNewJobArtifact(JobArtifact jobArtifact) throws IOException {
try {
CompletableFuture.runAsync(
() -> addNewJobArtifact(getJobArtifactsByNamePartitionKey(), jobArtifact.getName(), jobArtifact))
.thenRunAsync(
() -> addNewJobArtifact(jobArtifact.getName(), jobArtifact.getVersion(), jobArtifact))
// Given the lack of transactions in key-value stores we want to make sure that if one of these
// writes fail, we don't leave the metadata store with partial information.
// Storing artifactID in the last call should do the trick because the artifact discovery
// service will eventually retry on missing artifactIDs.
.thenRunAsync(
() ->
addNewJobArtifact(
jobArtifact.getArtifactID().getResourceID(),
jobArtifact.getArtifactID().getResourceID(),
jobArtifact))
.get();
} catch (InterruptedException | ExecutionException e) {
logger.error("Error while storing job artifact {} to Cassandra Storage Provider.", jobArtifact, e);
throw new IOException(e);
}
}
@Override
public ResourceClusterSpecWritable registerAndUpdateClusterSpec(
ResourceClusterSpecWritable clusterSpecW) throws IOException {
RegisteredResourceClustersWritable oldValue = getRegisteredResourceClustersWritable();
RegisteredResourceClustersWritable.RegisteredResourceClustersWritableBuilder rcBuilder =
(oldValue == null) ? RegisteredResourceClustersWritable.builder()
: oldValue.toBuilder();
RegisteredResourceClustersWritable newValue = rcBuilder
.cluster(
clusterSpecW.getId().getResourceID(),
RegisteredResourceClustersWritable.ClusterRegistration
.builder()
.clusterId(clusterSpecW.getId())
.version(clusterSpecW.getVersion())
.build())
.build();
// todo(sundaram): Check if this will work
kvStore.upsert(
CONTROLPLANE_NS,
getClusterKeyFromId(clusterSpecW.getId()), //partition key
"", //secondary key
mapper.writeValueAsString(clusterSpecW));
kvStore.upsert(CONTROLPLANE_NS, CLUSTER_REGISTRATION_KEY, "", mapper.writeValueAsString(newValue));
return getResourceClusterSpecWritable(clusterSpecW.getId());
}
@Override
public RegisteredResourceClustersWritable deregisterCluster(ClusterID clusterId)
throws IOException {
RegisteredResourceClustersWritable oldValue = getRegisteredResourceClustersWritable();
RegisteredResourceClustersWritable.RegisteredResourceClustersWritableBuilder rcBuilder =
RegisteredResourceClustersWritable.builder();
oldValue
.getClusters()
.entrySet()
.stream()
.filter(kv -> !Objects.equals(clusterId.getResourceID(), kv.getKey()))
.forEach(kv -> rcBuilder.cluster(kv.getKey(), kv.getValue()));
RegisteredResourceClustersWritable newValue = rcBuilder.build();
kvStore.upsert(
CONTROLPLANE_NS,
CLUSTER_REGISTRATION_KEY, //partition key
"", //secondary key
mapper.writeValueAsString(newValue));
kvStore.delete(CONTROLPLANE_NS, getClusterKeyFromId(clusterId), "");
return newValue;
}
@Override
public RegisteredResourceClustersWritable getRegisteredResourceClustersWritable()
throws IOException {
String values =
kvStore.get(CONTROLPLANE_NS, CLUSTER_REGISTRATION_KEY, "");
if (values == null) {
return RegisteredResourceClustersWritable.builder().build();
} else {
return mapper.readValue(values, RegisteredResourceClustersWritable.class);
}
}
@Override
public ResourceClusterSpecWritable getResourceClusterSpecWritable(ClusterID clusterID)
throws IOException {
String result = kvStore.get(
CONTROLPLANE_NS,
getClusterKeyFromId(clusterID), //partition key
"");
if (result == null) {
return null;
} else {
return mapper.readValue(result, ResourceClusterSpecWritable.class);
}
}
@Override
public ResourceClusterScaleRulesWritable getResourceClusterScaleRules(ClusterID clusterId)
throws IOException {
String res = kvStore.get(CONTROLPLANE_NS, getClusterRuleKeyFromId(clusterId), "");
if (res == null) {
return ResourceClusterScaleRulesWritable.builder().clusterId(clusterId).build();
} else {
return mapper.readValue(res, ResourceClusterScaleRulesWritable.class);
}
}
@Override
public ResourceClusterScaleRulesWritable registerResourceClusterScaleRule(
ResourceClusterScaleRulesWritable ruleSpec) throws IOException {
kvStore.upsert(
CONTROLPLANE_NS,
getClusterRuleKeyFromId(ruleSpec.getClusterId()), //partition key
"", //secondary key
mapper.writeValueAsString(ruleSpec));
return getResourceClusterScaleRules(ruleSpec.getClusterId());
}
@Override
public ResourceClusterScaleRulesWritable registerResourceClusterScaleRule(
ResourceClusterScaleSpec rule) throws IOException {
ResourceClusterScaleRulesWritable existing =
getResourceClusterScaleRules(rule.getClusterId());
final ResourceClusterScaleRulesWritable newSpec;
if (existing == null) {
newSpec =
ResourceClusterScaleRulesWritable.builder()
.clusterId(rule.getClusterId())
.scaleRule(rule.getSkuId().getResourceID(), rule)
.build();
} else {
newSpec =
existing
.toBuilder()
.scaleRule(rule.getSkuId().getResourceID(), rule)
.build();
}
return registerResourceClusterScaleRule(newSpec);
}
private static final String RESOURCE_CLUSTER_REGISTRATION = "ResourceClusterRegistration";
private static String getClusterKeyFromId(ClusterID id) {
return RESOURCE_CLUSTER_REGISTRATION + "_" + id.getResourceID();
}
private static final String CLUSTER_REGISTRATION_KEY = "resource_cluster_registrations";
private static final String RESOURCE_CLUSTER_RULE_PREFIX = "ResourceClusterRulePrefix";
private static String getClusterRuleKeyFromId(ClusterID id) {
return RESOURCE_CLUSTER_RULE_PREFIX + "_" + id.getResourceID();
}
}
| 7,990 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/persistence/MantisJobStore.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.persistence;
import io.mantisrx.master.jobcluster.IJobClusterMetadata;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.master.jobcluster.job.IMantisStageMetadata;
import io.mantisrx.master.jobcluster.job.MantisStageMetadataImpl;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.master.resourcecluster.DisableTaskExecutorsRequest;
import io.mantisrx.server.core.domain.ArtifactID;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob;
import io.mantisrx.server.master.persistence.exceptions.InvalidJobException;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.shaded.com.google.common.cache.Cache;
import io.mantisrx.shaded.com.google.common.cache.CacheBuilder;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.PriorityBlockingQueue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.schedulers.Schedulers;
public class MantisJobStore {
private static final Logger logger = LoggerFactory.getLogger(MantisJobStore.class);
private final IMantisPersistenceProvider storageProvider;
private final ConcurrentMap<String, String> archivedJobIds;
private final ArchivedJobsMetadataCache archivedJobsMetadataCache;
private final ArchivedWorkersCache archivedWorkersCache;
private final PriorityBlockingQueue<TerminatedJob> terminatedJobsToDelete;
public MantisJobStore(IMantisPersistenceProvider storageProvider) {
this.storageProvider = storageProvider;
archivedJobIds = new ConcurrentHashMap<>();
archivedWorkersCache = new ArchivedWorkersCache(ConfigurationProvider.getConfig().getMaxArchivedJobsToCache());
archivedJobsMetadataCache = new ArchivedJobsMetadataCache(ConfigurationProvider.getConfig().getMaxArchivedJobsToCache());
terminatedJobsToDelete = new PriorityBlockingQueue<>();
}
public void loadAllArchivedJobsAsync() {
logger.info("Beginning load of Archived Jobs");
storageProvider.loadAllArchivedJobs()
.subscribeOn(Schedulers.io())
.subscribe((job) -> {
archivedJobsMetadataCache.add(job);
archivedJobIds.put(job.getJobId().getId(), job.getJobId().getId());
terminatedJobsToDelete.add(new TerminatedJob(job.getJobId().getId(), getTerminatedAt(job)));
},
(e) -> logger.warn("Exception loading archived Jobs", e),
() -> logger.info("Finished Loading all archived Jobs!"));
}
private long getTerminatedAt(IMantisJobMetadata mjmd) {
long terminatedAt = mjmd.getSubmittedAtInstant().toEpochMilli();
for (IMantisStageMetadata msmd : mjmd.getStageMetadata().values()) {
for (JobWorker mwmd : msmd.getAllWorkers()) {
terminatedAt = Math.max(terminatedAt, mwmd.getMetadata().getCompletedAt());
}
}
return terminatedAt;
}
public List<IJobClusterMetadata> loadAllJobClusters() throws IOException {
List<IJobClusterMetadata> iJobClusterMetadataList = storageProvider.loadAllJobClusters();
logger.info("Loaded {} job clusters", iJobClusterMetadataList.size());
return iJobClusterMetadataList;
}
public List<IMantisJobMetadata> loadAllActiveJobs() throws IOException {
List<IMantisJobMetadata> mantisJobMetadataList = storageProvider.loadAllJobs();
logger.info("Loaded {} active jobs", mantisJobMetadataList.size());
return mantisJobMetadataList;
}
public List<CompletedJob> loadAllCompletedJobs() throws IOException {
return storageProvider.loadAllCompletedJobs();
}
public void createJobCluster(IJobClusterMetadata jobCluster) throws Exception {
storageProvider.createJobCluster(jobCluster);
}
public void updateJobCluster(IJobClusterMetadata jobCluster) throws Exception {
storageProvider.updateJobCluster(jobCluster);
}
public void deleteJobCluster(String name) throws Exception {
storageProvider.deleteJobCluster(name);
}
public void deleteJob(String jobId) throws Exception {
archivedJobsMetadataCache.remove(jobId);
archivedWorkersCache.remove(jobId);
storageProvider.deleteJob(jobId);
}
public void deleteCompletedJob(String clusterName, String jobId) throws IOException {
storageProvider.removeCompletedJobForCluster(clusterName, jobId);
}
public void storeCompletedJobForCluster(String name, CompletedJob completedJob) throws IOException {
storageProvider.storeCompletedJobForCluster(name, completedJob);
}
public void storeNewJob(IMantisJobMetadata jobMetadata) throws Exception {
storageProvider.storeNewJob(jobMetadata);
}
public TaskExecutorRegistration getTaskExecutor(TaskExecutorID taskExecutorID) throws IOException {
return storageProvider.getTaskExecutorFor(taskExecutorID);
}
public void storeNewTaskExecutor(TaskExecutorRegistration registration) throws IOException {
storageProvider.storeNewTaskExecutor(registration);
}
public void storeNewDisabledTaskExecutorsRequest(DisableTaskExecutorsRequest request) throws IOException {
storageProvider.storeNewDisableTaskExecutorRequest(request);
}
public void deleteExpiredDisableTaskExecutorsRequest(DisableTaskExecutorsRequest request) throws IOException {
storageProvider.deleteExpiredDisableTaskExecutorRequest(request);
}
public List<DisableTaskExecutorsRequest> loadAllDisableTaskExecutorsRequests(ClusterID clusterID) throws IOException {
return storageProvider.loadAllDisableTaskExecutorsRequests(clusterID);
}
public void replaceTerminatedWorker(IMantisWorkerMetadata oldWorker, IMantisWorkerMetadata replacement) throws Exception {
storageProvider.storeAndUpdateWorkers(oldWorker, replacement);
}
public void updateJob(final IMantisJobMetadata jobMetadata) throws Exception {
storageProvider.updateJob(jobMetadata);
}
public void updateStage(IMantisStageMetadata stageMeta) throws IOException {
storageProvider.updateMantisStage(stageMeta);
}
public List<? extends IMantisWorkerMetadata> storeNewWorkers(IMantisJobMetadata job, List<IMantisWorkerMetadata> workerRequests)
throws IOException, InvalidJobException {
if (workerRequests == null || workerRequests.isEmpty())
return null;
String jobId = workerRequests.get(0).getJobId();
logger.debug("Adding {} workers for job {}", workerRequests.size(), jobId);
List<IMantisWorkerMetadata> addedWorkers = new ArrayList<>();
List<Integer> savedStageList = Lists.newArrayList();
for (IMantisWorkerMetadata workerRequest : workerRequests) {
// store stage if not stored already
if (!savedStageList.contains(workerRequest.getStageNum())) {
Optional<IMantisStageMetadata> stageMetadata = job.getStageMetadata(workerRequest.getStageNum());
if (stageMetadata.isPresent()) {
storageProvider.storeMantisStage(stageMetadata.get());
} else {
throw new RuntimeException(String.format("No such stage %d", workerRequest.getStageNum()));
}
savedStageList.add(workerRequest.getStageNum());
}
addedWorkers.add(workerRequest);
}
storageProvider.storeWorkers(jobId, addedWorkers);
return addedWorkers;
}
public void storeNewWorker(IMantisWorkerMetadata workerRequest)
throws IOException, InvalidJobException {
storageProvider.storeWorker(workerRequest);
}
public void updateWorker(IMantisWorkerMetadata worker) throws IOException {
storageProvider.updateWorker(worker);
}
private void archiveWorkersIfAny(IMantisJobMetadata mjmd) throws IOException {
for (IMantisStageMetadata msmd : mjmd.getStageMetadata().values()) {
for (JobWorker removedWorker :
((MantisStageMetadataImpl) msmd).removeArchiveableWorkers()) {
archiveWorker(removedWorker.getMetadata());
}
}
}
public void archiveWorker(IMantisWorkerMetadata worker) throws IOException {
storageProvider.archiveWorker(worker);
ConcurrentMap<Integer, IMantisWorkerMetadata> workersMap;
try {
workersMap = archivedWorkersCache.getArchivedWorkerMap(worker.getJobId());
workersMap.putIfAbsent(worker.getWorkerNumber(), worker);
} catch (ExecutionException e) {
logger.warn("Error adding worker to archived cache", e);
}
}
public Optional<IMantisJobMetadata> getArchivedJob(final String jobId) {
final Optional<IMantisJobMetadata> jobOp = Optional.ofNullable(archivedJobsMetadataCache.getJob(jobId));
if (!jobOp.isPresent()) {
logger.error("archivedJobsMetadataCache found no job for job ID {}", jobId);
}
return jobOp;
}
public void archiveJob(IMantisJobMetadata job) throws IOException {
archivedJobsMetadataCache.add(job);
storageProvider.archiveJob(job.getJobId().getId());
}
/**
* @param jobId
* @param workerNumber
*
* @return
*/
public Optional<IMantisWorkerMetadata> getArchivedWorker(String jobId, int workerNumber) {
try {
ConcurrentMap<Integer, IMantisWorkerMetadata> workersMap = archivedWorkersCache.getArchivedWorkerMap(jobId);
if (workersMap != null) {
return Optional.ofNullable(workersMap.get(workerNumber));
}
} catch (ExecutionException e) {
logger.warn("Exception getting archived worker", e);
}
return Optional.empty();
}
public List<IMantisWorkerMetadata> getArchivedWorkers(String jobId) throws Exception {
return ImmutableList.copyOf(archivedWorkersCache.getArchivedWorkerMap(jobId).values());
}
public void addNewJobArtifactsToCache(ClusterID clusterID, List<ArtifactID> artifacts) throws IOException {
storageProvider.addNewJobArtifactsToCache(clusterID, artifacts);
}
public void removeJobArtifactsToCache(ClusterID clusterID, List<ArtifactID> artifacts) throws IOException {
storageProvider.removeJobArtifactsToCache(clusterID, artifacts);
}
public List<String> getJobArtifactsToCache(ClusterID clusterID) throws IOException {
return storageProvider.listJobArtifactsToCache(clusterID);
}
private static class TerminatedJob implements Comparable<TerminatedJob> {
private final String jobId;
private final long terminatedTime;
private TerminatedJob(String jobId, long terminatedTime) {
this.jobId = jobId;
this.terminatedTime = terminatedTime;
}
@Override
public int compareTo(TerminatedJob o) {
return Long.compare(terminatedTime, o.terminatedTime);
}
}
private class ArchivedWorkersCache {
private final Cache<String, ConcurrentMap<Integer, IMantisWorkerMetadata>> cache;
ArchivedWorkersCache(int cacheSize) {
cache = CacheBuilder
.newBuilder()
.maximumSize(cacheSize)
.build();
}
ConcurrentMap<Integer, IMantisWorkerMetadata> getArchivedWorkerMap(final String jobId) throws ExecutionException {
return cache.get(jobId, () -> {
List<IMantisWorkerMetadata> workers = storageProvider.getArchivedWorkers(jobId);
ConcurrentMap<Integer, IMantisWorkerMetadata> theMap = new ConcurrentHashMap<>();
if (workers != null) {
for (IMantisWorkerMetadata mwmd : workers) {
theMap.putIfAbsent(mwmd.getWorkerNumber(), mwmd);
}
}
return theMap;
});
}
void remove(String jobId) {
cache.invalidate(jobId);
}
}
private class ArchivedJobsMetadataCache {
private final Cache<String, Optional<IMantisJobMetadata>> cache;
ArchivedJobsMetadataCache(int cacheSize) {
cache = CacheBuilder
.newBuilder()
.maximumSize(cacheSize)
.build();
}
IMantisJobMetadata getJob(String jobId) {
try {
final Optional<IMantisJobMetadata> jobMetadata = cache.get(jobId, () -> loadArchivedJobImpl(jobId));
return jobMetadata.orElse(null);
} catch (Exception e) {
return null;
}
}
private Optional<IMantisJobMetadata> loadArchivedJobImpl(String jobId) throws IOException, ExecutionException {
final Optional<IMantisJobMetadata> jobMetadata = storageProvider.loadArchivedJob(jobId);
if (!jobMetadata.isPresent()) {
logger.warn("Failed to load archived job {}. No job found!", jobId);
}
return jobMetadata;
}
void add(IMantisJobMetadata job) {
cache.put(job.getJobId().getId(), Optional.ofNullable(job));
}
void remove(String jobId) {
cache.invalidate(jobId);
}
}
}
| 7,991 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/persistence/FileBasedPersistenceProvider.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.persistence;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventsProto;
import io.mantisrx.master.jobcluster.IJobClusterMetadata;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.server.master.store.FileBasedStore;
import io.mantisrx.server.master.store.KeyValueStore;
import java.io.File;
import java.io.IOException;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Simple File based storage provider. Intended mainly as a sample implementation for
* {@link IMantisPersistenceProvider} interface. This implementation is complete in its functionality, but, isn't
* expected to be scalable or performant for production loads.
* <p>This implementation is mainly for testing.</p>
*/
public class FileBasedPersistenceProvider extends KeyValueBasedPersistenceProvider {
private static final Logger logger = LoggerFactory.getLogger(FileBasedPersistenceProvider.class);
private static final LifecycleEventPublisher noopEventPublisher = new LifecycleEventPublisher() {
@Override
public void publishAuditEvent(LifecycleEventsProto.AuditEvent auditEvent) {
}
@Override
public void publishStatusEvent(LifecycleEventsProto.StatusEvent statusEvent) {
}
@Override
public void publishWorkerListChangedEvent(LifecycleEventsProto.WorkerListChangedEvent workerListChangedEvent) {
}
};
public FileBasedPersistenceProvider(boolean actualStorageProvider) {
this((actualStorageProvider) ? new FileBasedStore() : KeyValueStore.NO_OP,
noopEventPublisher);
}
public FileBasedPersistenceProvider(KeyValueStore sprovider, LifecycleEventPublisher publisher) {
super(sprovider, publisher);
}
public FileBasedPersistenceProvider(FileBasedStore sprovider) {
this(sprovider, noopEventPublisher);
}
public FileBasedPersistenceProvider(File stateDirectory, boolean eventPublisher) {
this(new FileBasedStore(stateDirectory), noopEventPublisher);
}
Optional<IJobClusterMetadata> loadJobCluster(String clusterName) throws IOException {
return loadAllJobClusters().stream()
.filter(jc -> clusterName.equals(jc.getJobClusterDefinition().getName()))
.findFirst();
}
Optional<IMantisJobMetadata> loadActiveJob(String jobId) throws IOException {
return loadAllJobs().stream()
.filter(job -> jobId.equals(job.getJobId().getId()))
.findFirst();
}
}
| 7,992 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/persistence | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/persistence/exceptions/InvalidWorkerStateChangeException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.persistence.exceptions;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.server.core.domain.WorkerId;
public class InvalidWorkerStateChangeException extends Exception {
/**
*
*/
private static final long serialVersionUID = 6997193965197779136L;
public InvalidWorkerStateChangeException(String jobId, WorkerState state) {
super("Unexpected state " + state + " for job " + jobId);
}
public InvalidWorkerStateChangeException(String jobId, WorkerState state, Throwable t) {
super("Unexpected state " + state + " for job " + jobId, t);
}
public InvalidWorkerStateChangeException(String jobId, WorkerId workerId, WorkerState fromState, WorkerState toState) {
super("Invalid worker state transition of " + workerId.getId() + " from state " + fromState + " to " + toState);
}
public InvalidWorkerStateChangeException(String jobId, WorkerState fromState, WorkerState toState, Throwable cause) {
super("Invalid worker state transition of job " + jobId + " from state " + fromState + " to " + toState, cause);
}
}
| 7,993 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/persistence | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/persistence/exceptions/InvalidJobException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.persistence.exceptions;
import io.mantisrx.server.master.domain.JobId;
public class InvalidJobException extends Exception {
/**
* for serialization.
*/
private static final long serialVersionUID = -6012093609773859131L;
public InvalidJobException(String id) {
super(id);
}
public InvalidJobException(String id, Throwable cause) {
super(id, cause);
}
public InvalidJobException(JobId jobId, int stageNum, int workerId) {
super(jobId + ((stageNum >= 0) ? "-stage-" + stageNum : "") + ((workerId >= 0) ? "-worker-" + workerId : ""));
}
public InvalidJobException(JobId jobId, int stageNum, int workerId, Throwable cause) {
super(jobId + ((stageNum >= 0) ? "-stage-" + stageNum : "") + ((workerId >= 0) ? "-worker-" + workerId : ""), cause);
}
public InvalidJobException(String jobId, int stageNum, int workerId) {
super(jobId + ((stageNum >= 0) ? "-stage-" + stageNum : "") + ((workerId >= 0) ? "-worker-" + workerId : ""));
}
public InvalidJobException(String jobId, int stageNum, int workerId, Throwable cause) {
super(jobId + ((stageNum >= 0) ? "-stage-" + stageNum : "") + ((workerId >= 0) ? "-worker-" + workerId : ""), cause);
}
}
| 7,994 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/persistence | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/persistence/exceptions/InvalidJobStateChangeException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.persistence.exceptions;
import io.mantisrx.master.jobcluster.job.JobState;
public class InvalidJobStateChangeException extends Exception {
/**
* for serialization.
*/
private static final long serialVersionUID = 7215672111575922178L;
public InvalidJobStateChangeException(String jobId, JobState state) {
super("Unexpected state " + state + " for job " + jobId);
}
public InvalidJobStateChangeException(String jobId, JobState state, Throwable t) {
super("Unexpected state " + state + " for job " + jobId, t);
}
public InvalidJobStateChangeException(String jobId, JobState fromState, JobState toState) {
super("Invalid state transition of job " + jobId + " from state " + fromState + " to " + toState);
}
public InvalidJobStateChangeException(String jobId, JobState fromState, JobState toState, Throwable cause) {
super("Invalid state transition of job " + jobId + " from state " + fromState + " to " + toState, cause);
}
}
| 7,995 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/persistence | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/persistence/exceptions/JobClusterAlreadyExistsException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.persistence.exceptions;
public class JobClusterAlreadyExistsException extends Exception {
/**
*
*/
private static final long serialVersionUID = -1492003797257425141L;
public JobClusterAlreadyExistsException(String jobClusterName) {
super(jobClusterName);
}
public JobClusterAlreadyExistsException(String jobClusterName, Throwable cause) {
super(jobClusterName, cause);
}
}
| 7,996 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/heartbeathandlers/PayloadExecutor.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//package io.mantisrx.server.master.heartbeathandlers;
//
//import rx.Observer;
//
//public interface PayloadExecutor {
// public Observer<HeartbeatPayloadHandler.Data> call();
//}
| 7,997 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/domain/Costs.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import lombok.Value;
/**
* Represents the cost of running a job. Currently, this only tracks the daily cost of running a
* job. But in the future, we may want to track other costs such as the cost of running a job for an
* hour.
* <p>
* Similarly, we can also break up the cost into different components such as the cost of CPUs and
* the cost of memory.
* </p>
*/
@Value
public class Costs {
Double dailyCost;
@JsonIgnore
public static final Costs ZERO = new Costs(0.0);
public Costs multipliedBy(double multiplier) {
return new Costs(dailyCost * multiplier);
}
public Costs plus(Costs other) {
return new Costs(dailyCost + other.dailyCost);
}
}
| 7,998 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/domain/JobClusterMetadata.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicLong;
public class JobClusterMetadata {
private final String name;
private final List<Jar> jars;
private final JobOwner owner;
private final SLA sla;
private final List<Parameter> parameters;
private final boolean isReadyForJobMaster;
private final boolean disabled;
private final WorkerMigrationConfig migrationConfig;
private final List<Label> labels;
private final AtomicLong lastJobCount = new AtomicLong(0);
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public JobClusterMetadata(@JsonProperty("name") String name,
@JsonProperty("jars") List<Jar> jars,
@JsonProperty("sla") SLA sla,
@JsonProperty("parameters") List<Parameter> parameters,
@JsonProperty("owner") JobOwner owner,
@JsonProperty("lastJobCount") long lastJobCount,
@JsonProperty("disabled") boolean disabled,
@JsonProperty("isReadyForJobMaster") boolean isReadyForJobMaster,
@JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig,
@JsonProperty("labels") List<Label> labels) {
this.name = name;
this.jars = Optional.ofNullable(jars).orElse(new ArrayList<>());
this.sla = sla;
this.parameters = Optional.ofNullable(parameters).orElse(new ArrayList<>());
this.isReadyForJobMaster = isReadyForJobMaster;
this.owner = owner;
this.migrationConfig = migrationConfig;
this.labels = labels;
this.disabled = disabled;
this.lastJobCount.set(lastJobCount);
}
public String getName() {
return name;
}
public List<Jar> getJars() {
return jars;
}
public JobOwner getOwner() {
return owner;
}
public SLA getSla() {
return sla;
}
public List<Parameter> getParameters() {
return parameters;
}
public boolean isReadyForJobMaster() {
return isReadyForJobMaster;
}
public boolean isDisabled() {
return disabled;
}
public WorkerMigrationConfig getMigrationConfig() {
return migrationConfig;
}
public List<Label> getLabels() {
return labels;
}
public AtomicLong getLastJobCount() {
return lastJobCount;
}
}
| 7,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.