index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core/master/MasterMonitor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.core.master; import rx.Observable; public interface MasterMonitor { Observable<MasterDescription> getMasterObservable(); MasterDescription getLatestMaster(); }
4,200
0
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core/master/ZookeeperMasterMonitor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.core.master; import java.io.IOException; import java.util.concurrent.atomic.AtomicReference; import io.mantisrx.server.core.json.DefaultObjectMapper; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.api.BackgroundCallback; import org.apache.curator.framework.api.CuratorEvent; import org.apache.curator.framework.recipes.cache.NodeCache; import org.apache.curator.framework.recipes.cache.NodeCacheListener; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.subjects.BehaviorSubject; /** * A monitor that monitors the status of Mantis masters. */ public class ZookeeperMasterMonitor implements MasterMonitor { private static final Logger logger = LoggerFactory.getLogger(ZookeeperMasterMonitor.class); private final CuratorFramework curator; private final String masterPath; private final BehaviorSubject<MasterDescription> masterSubject; private final AtomicReference<MasterDescription> latestMaster = new AtomicReference<>(); private final NodeCache nodeMonitor; public ZookeeperMasterMonitor(CuratorFramework curator, String masterPath, MasterDescription initValue) { this.curator = curator; this.masterPath = masterPath; this.masterSubject = BehaviorSubject.create(initValue); this.nodeMonitor = new NodeCache(curator, masterPath); this.latestMaster.set(initValue); } public void start() { nodeMonitor.getListenable().addListener(new NodeCacheListener() { @Override public void nodeChanged() throws Exception { retrieveMaster(); } }); try { nodeMonitor.start(); } catch (Exception e) { throw new IllegalStateException("Failed to start master node monitor: " + e.getMessage(), e); } logger.info("The ZK master monitor is started"); } private void retrieveMaster() { try { curator .sync() // sync with ZK before reading .inBackground( curator .getData() .inBackground(new BackgroundCallback() { @Override public void processResult(CuratorFramework client, CuratorEvent event) throws Exception { MasterDescription description = DefaultObjectMapper.getInstance().readValue(event.getData(), MasterDescription.class); logger.info("New master retrieved: " + description); latestMaster.set(description); masterSubject.onNext(description); } }) .forPath(masterPath) ) .forPath(masterPath); } catch (Exception e) { logger.error("Failed to retrieve updated master information: " + e.getMessage(), e); } } @Override public Observable<MasterDescription> getMasterObservable() { return masterSubject; } @Override public MasterDescription getLatestMaster() { return latestMaster.get(); } public void shutdown() { try { nodeMonitor.close(); logger.info("ZK master monitor is shut down"); } catch (IOException e) { throw new RuntimeException("Failed to close the ZK node monitor: " + e.getMessage(), e); } } }
4,201
0
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core/master/LocalMasterMonitor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.core.master; import rx.Observable; /** * A {@code MasterMonitor} implementation that does not monitor anything. Use this * class for local testing. */ public class LocalMasterMonitor implements MasterMonitor { private final MasterDescription master; public LocalMasterMonitor(MasterDescription master) { this.master = master; } @Override public Observable<MasterDescription> getMasterObservable() { return Observable.just(master); } @Override public MasterDescription getLatestMaster() { return master; } }
4,202
0
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core/master/MasterDescription.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.core.master; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.shaded.com.google.common.base.Objects; /** * A JSON-serializable data transfer object for Mantis master descriptions. It's used to transfer * metadata between master and workers. */ public class MasterDescription { public static final String JSON_PROP_HOSTNAME = "hostname"; public static final String JSON_PROP_HOST_IP = "hostIP"; public static final String JSON_PROP_API_PORT = "apiPort"; public static final String JSON_PROP_SCHED_INFO_PORT = "schedInfoPort"; public static final String JSON_PROP_API_PORT_V2 = "apiPortV2"; public static final String JSON_PROP_API_STATUS_URI = "apiStatusUri"; public static final String JSON_PROP_CONSOLE_PORT = "consolePort"; public static final String JSON_PROP_CREATE_TIME = "createTime"; private final String hostname; private final String hostIP; private final int apiPort; private final int schedInfoPort; private final int apiPortV2; private final String apiStatusUri; private final long createTime; private final int consolePort; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public MasterDescription( @JsonProperty(JSON_PROP_HOSTNAME) String hostname, @JsonProperty(JSON_PROP_HOST_IP) String hostIP, @JsonProperty(JSON_PROP_API_PORT) int apiPort, @JsonProperty(JSON_PROP_SCHED_INFO_PORT) int schedInfoPort, @JsonProperty(JSON_PROP_API_PORT_V2) int apiPortV2, @JsonProperty(JSON_PROP_API_STATUS_URI) String apiStatusUri, @JsonProperty(JSON_PROP_CONSOLE_PORT) int consolePort, @JsonProperty(JSON_PROP_CREATE_TIME) long createTime ) { this.hostname = hostname; this.hostIP = hostIP; this.apiPort = apiPort; this.schedInfoPort = schedInfoPort; this.apiPortV2 = apiPortV2; this.apiStatusUri = apiStatusUri; this.consolePort = consolePort; this.createTime = createTime; } @JsonProperty(JSON_PROP_HOSTNAME) public String getHostname() { return hostname; } @JsonProperty(JSON_PROP_HOST_IP) public String getHostIP() { return hostIP; } @JsonProperty(JSON_PROP_API_PORT) public int getApiPort() { return apiPort; } @JsonProperty(JSON_PROP_SCHED_INFO_PORT) public int getSchedInfoPort() { return schedInfoPort; } @JsonProperty(JSON_PROP_API_PORT_V2) public int getApiPortV2() { return apiPortV2; } @JsonProperty(JSON_PROP_API_STATUS_URI) public String getApiStatusUri() { return apiStatusUri; } @JsonProperty(JSON_PROP_CREATE_TIME) public long getCreateTime() { return createTime; } public String getFullApiStatusUri() { String uri = getApiStatusUri().trim(); if (uri.startsWith("/")) { uri = uri.substring(1); } return String.format("http://%s:%d/%s", getHostname(), getApiPort(), uri); } @JsonProperty(JSON_PROP_CONSOLE_PORT) public int getConsolePort() { return consolePort; } @Override public String toString() { return Objects.toStringHelper(this) .add("hostname", hostname) .add("hostIP", hostIP) .add("apiPort", apiPort) .add("schedInfoPort", schedInfoPort) .add("apiPortV2", apiPortV2) .add("apiStatusUri", apiStatusUri) .add("createTime", createTime) .add("consolePort", consolePort) .toString(); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; MasterDescription that = (MasterDescription) o; if (apiPort != that.apiPort) return false; if (schedInfoPort != that.schedInfoPort) return false; if (apiPortV2 != that.apiPortV2) return false; if (consolePort != that.consolePort) return false; if (createTime != that.createTime) return false; if (apiStatusUri != null ? !apiStatusUri.equals(that.apiStatusUri) : that.apiStatusUri != null) return false; if (hostIP != null ? !hostIP.equals(that.hostIP) : that.hostIP != null) return false; return hostname != null ? hostname.equals(that.hostname) : that.hostname == null; } @Override public int hashCode() { int result = hostname != null ? hostname.hashCode() : 0; result = 31 * result + (hostIP != null ? hostIP.hashCode() : 0); result = 31 * result + apiPort; result = 31 * result + apiPortV2; result = 31 * result + (apiStatusUri != null ? apiStatusUri.hashCode() : 0); result = 31 * result + (int) (createTime ^ (createTime >>> 32)); result = 31 * result + consolePort; return result; } }
4,203
0
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core/domain/WorkerId.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.core.domain; import java.util.Optional; import org.slf4j.Logger; import org.slf4j.LoggerFactory; //import com.google.common.base.Preconditions; public class WorkerId { private static final Logger logger = LoggerFactory.getLogger(WorkerId.class); private static final String DELIMITER = "-"; private static final String WORKER_DELIMITER = "-worker-"; private final String jobCluster; private final String jobId; private final int wIndex; private final int wNum; private final String id; public WorkerId(final String jobId, final int wIndex, final int wNum) { this(WorkerId.getJobClusterFromId(jobId), jobId, wIndex, wNum); } public WorkerId(final String jobCluster, final String jobId, final int wIndex, final int wNum) { // Preconditions.checkNotNull(jobCluster, "jobCluster"); // Preconditions.checkNotNull(jobId, "jobId"); // Preconditions.checkArgument(wIndex >= 0); // Preconditions.checkArgument(wNum >= 0); this.jobCluster = jobCluster; this.jobId = jobId; this.wIndex = wIndex; this.wNum = wNum; this.id = new StringBuilder() .append(jobId) .append(WORKER_DELIMITER) .append(wIndex) .append('-') .append(wNum) .toString(); } private static String getJobClusterFromId(final String jobId) { final int jobClusterIdx = jobId.lastIndexOf(DELIMITER); if (jobClusterIdx > 0) { return jobId.substring(0, jobClusterIdx); } else { logger.error("Failed to get JobCluster name from Job ID {}", jobId); throw new IllegalArgumentException("Job ID is invalid " + jobId); } } /* Returns a valid WorkerId only if the passed 'id' string is well-formed. There are some instances in Master currently where we could get back index = -1 which would fail to get a valid WorkerId from String. */ public static Optional<WorkerId> fromId(final String id) { final int workerDelimIndex = id.indexOf(WORKER_DELIMITER); if (workerDelimIndex > 0) { final String jobId = id.substring(0, workerDelimIndex); final int jobClusterIdx = jobId.lastIndexOf(DELIMITER); if (jobClusterIdx > 0) { final String jobCluster = jobId.substring(0, jobClusterIdx); final String workerInfo = id.substring(workerDelimIndex + WORKER_DELIMITER.length()); final int delimiterIndex = workerInfo.indexOf(DELIMITER); if (delimiterIndex > 0) { try { final int wIndex = Integer.parseInt(workerInfo.substring(0, delimiterIndex)); final int wNum = Integer.parseInt(workerInfo.substring(delimiterIndex + 1)); return Optional.of(new WorkerId(jobCluster, jobId, wIndex, wNum)); } catch (NumberFormatException nfe) { logger.warn("failed to parse workerId from {}", id, nfe); } } } } logger.warn("failed to parse workerId from {}", id); return Optional.empty(); } public String getJobCluster() { return jobCluster; } public String getJobId() { return jobId; } public int getWorkerIndex() { return wIndex; } public int getWorkerNum() { return wNum; } public String getId() { return id; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WorkerId workerId = (WorkerId) o; if (wIndex != workerId.wIndex) return false; if (wNum != workerId.wNum) return false; if (!jobCluster.equals(workerId.jobCluster)) return false; if (!jobId.equals(workerId.jobId)) return false; return id.equals(workerId.id); } @Override public int hashCode() { int result = jobCluster.hashCode(); result = 31 * result + jobId.hashCode(); result = 31 * result + wIndex; result = 31 * result + wNum; result = 31 * result + id.hashCode(); return result; } @Override public String toString() { return id; } }
4,204
0
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core/domain/JobMetadata.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.core.domain; import java.net.URL; import java.util.List; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.parameter.Parameter; public class JobMetadata { private final String jobId; private final URL jobJarUrl; private final int totalStages; private final String user; private final SchedulingInfo schedulingInfo; private final List<Parameter> parameters; private final long subscriptionTimeoutSecs; private final long minRuntimeSecs; public JobMetadata(final String jobId, final URL jobJarUrl, final int totalStages, final String user, final SchedulingInfo schedulingInfo, final List<Parameter> parameters, final long subscriptionTimeoutSecs, final long minRuntimeSecs) { this.jobId = jobId; this.jobJarUrl = jobJarUrl; this.totalStages = totalStages; this.user = user; this.schedulingInfo = schedulingInfo; this.parameters = parameters; this.subscriptionTimeoutSecs = subscriptionTimeoutSecs; this.minRuntimeSecs = minRuntimeSecs; } public String getJobId() { return jobId; } public URL getJobJarUrl() { return jobJarUrl; } public int getTotalStages() { return totalStages; } public String getUser() { return user; } public List<Parameter> getParameters() { return parameters; } public SchedulingInfo getSchedulingInfo() { return schedulingInfo; } public long getSubscriptionTimeoutSecs() { return subscriptionTimeoutSecs; } public long getMinRuntimeSecs() { return minRuntimeSecs; } }
4,205
0
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core/stats/MetricStringConstants.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.core.stats; public class MetricStringConstants { public static final String METRIC_NAME_STR = "name"; public static final String MANTIS_JOB_NAME = "mantisJobName"; public static final String MANTIS_JOB_ID = "mantisJobId"; public static final String MANTIS_STAGE_NUM = "mantisStageNum"; public static final String MANTIS_WORKER_INDEX = "mantisWorkerIndex"; public static final String MANTIS_WORKER_NUM = "mantisWorkerNum"; // Resource Usage metrics public static final String RESOURCE_USAGE_METRIC_GROUP = "ResourceUsage"; public static final String CPU_PCT_LIMIT = "cpuPctLimit"; public static final String CPU_PCT_USAGE_CURR = "cpuPctUsageCurr"; public static final String CPU_PCT_USAGE_PEAK = "cpuPctUsagePeak"; public static final String MEM_LIMIT = "memLimit"; public static final String CACHED_MEM_USAGE_CURR = "cachedMemUsageCurr"; public static final String CACHED_MEM_USAGE_PEAK = "cachedMemUsagePeak"; public static final String TOT_MEM_USAGE_CURR = "totMemUsageCurr"; public static final String TOT_MEM_USAGE_PEAK = "totMemUsagePeak"; public static final String NW_BYTES_LIMIT = "nwBytesLimit"; public static final String NW_BYTES_USAGE_CURR = "nwBytesUsageCurr"; public static final String NW_BYTES_USAGE_PEAK = "nwBytesUsagePeak"; // Data drop metrics public static final String DATA_DROP_METRIC_GROUP = "DataDrop"; public static final String DROP_COUNT = "dropCount"; public static final String ON_NEXT_COUNT = "onNextCount"; public static final String DROP_PERCENT = "dropPercent"; // Kafka lag metric public static final String KAFKA_CONSUMER_FETCH_MGR_METRIC_GROUP = "consumer-fetch-manager-metrics"; public static final String KAFKA_LAG = "records-lag-max"; public static final String KAFKA_PROCESSED = "records-consumed-rate"; // RPS Metrics public static final String WORKER_STAGE_INNER_INPUT = "worker_stage_inner_input"; public static final String ON_NEXT_GAUGE = "onNextGauge"; private MetricStringConstants() {} }
4,206
0
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core/stats/UsageDataStats.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.core.stats; import java.util.ArrayDeque; import io.mantisrx.runtime.descriptor.StageScalingPolicy; public class UsageDataStats { private final int capacity; private final ArrayDeque<Double> data; private final double highThreshold; private final double lowThreshold; private double sum = 0.0; private int countAboveHighThreshold = 0; private int countBelowLowThreshold = 0; private StageScalingPolicy.RollingCount rollingCount = new StageScalingPolicy.RollingCount(1, 1); public UsageDataStats(double highThreshold, double lowThreshold, StageScalingPolicy.RollingCount rollingCount) { this.capacity = rollingCount.getOf(); data = new ArrayDeque<>(capacity); this.highThreshold = highThreshold; this.lowThreshold = lowThreshold; this.rollingCount = rollingCount; } public void add(double d) { if (data.size() >= capacity) { final Double removed = data.removeFirst(); sum -= removed; if (removed > highThreshold) countAboveHighThreshold--; if (removed < lowThreshold && lowThreshold > 0.0) { // disable scaleDown for lowThreshold <= 0 countBelowLowThreshold--; } } data.addLast(d); sum += d; if (d > highThreshold) countAboveHighThreshold++; if (d < lowThreshold && lowThreshold > 0.0) { // disable scaleDown for lowThreshold <= 0 countBelowLowThreshold++; } } public int getCapacity() { return capacity; } public double getAverage() { return sum / data.size(); } public int getCountAboveHighThreshold() { return countAboveHighThreshold; } public int getCountBelowLowThreshold() { return countBelowLowThreshold; } public int getSize() { return data.size(); } public boolean getHighThreshTriggered() { return data.size() >= rollingCount.getCount() && countAboveHighThreshold >= rollingCount.getCount(); } public boolean getLowThreshTriggered() { return data.size() >= rollingCount.getCount() && countBelowLowThreshold >= rollingCount.getCount(); } public String getCurrentHighCount() { return countAboveHighThreshold + " of " + data.size(); } public String getCurrentLowCount() { return countBelowLowThreshold + " of " + data.size(); } }
4,207
0
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core
Create_ds/mantis-control-plane/core/src/main/java/io/mantisrx/server/core/stats/SimpleStats.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.core.stats; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; // Simple stats to figure out outlier threshold, specifically for low number of data points. // This is meant for finding out outliers greater than most of the data, not outliers that are smaller. // We make special cases as needed. public class SimpleStats { private final int maxDataPoints; private final ArrayList<Double> dataPoints; public SimpleStats(int maxDataPoints) { this.maxDataPoints = maxDataPoints; dataPoints = new ArrayList<>(); } public SimpleStats(Collection<Double> data) { this.maxDataPoints = data.size(); dataPoints = new ArrayList<>(data); } public static void main(String[] args) { SimpleStats simpleStats = new SimpleStats(5); simpleStats.add(4.0); for (int i = 1; i < 4; i++) simpleStats.add(0.0); simpleStats.add(10.0); System.out.println(String.format("thresh=%8.2f", simpleStats.getOutlierThreshold())); } public void add(double d) { if (dataPoints.size() == maxDataPoints) dataPoints.remove(0); dataPoints.add(d); } public double getOutlierThreshold() { if (dataPoints.size() <= 2) return twoPointsResults(); Double[] data = dataPoints.toArray(new Double[0]); Arrays.sort(data); // special case when the highest item is the major contributor of the total double total = 0.0; for (double d : data) total += d; if (data[data.length - 1] / total > 0.75) return data[data.length - 2]; if (dataPoints.size() == 3) return threePointsResults(data); if (dataPoints.size() == 4) return fourPointsResults(data); double q1 = data[(int) Math.round((double) data.length / 4.0)]; double q3 = data[(int) Math.floor((double) data.length * 3.0 / 4.0)]; return getThresh(q1, q3); } private double fourPointsResults(Double[] data) { return getThresh(data[1], data[2]); } private double getThresh(double q1, double q3) { return q3 + q3 - q1; } private double threePointsResults(Double[] data) { double q1 = (data[0] + data[1]) / 2.0; double q3 = (data[1] + data[2]) / 2.0; return getThresh(q1, q3); } private double twoPointsResults() { return dataPoints.isEmpty() ? 0.0 : dataPoints.get(0) == 0.0 ? 0.0 : dataPoints.get(dataPoints.size() - 1); } public boolean isSufficientData() { return dataPoints.size() > 3; } @Override public String toString() { return "SimpleStats{" + "maxDataPoints=" + maxDataPoints + ", dataPoints=" + dataPoints + '}'; } }
4,208
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/server/master/persistence/SimpleCachedFileStorageProviderTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.persistence; import static io.mantisrx.master.jobcluster.job.worker.MantisWorkerMetadataImpl.MANTIS_SYSTEM_ALLOCATED_NUM_PORTS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.PrintWriter; import java.time.Instant; import java.util.List; import java.util.Map; import java.util.Optional; import io.mantisrx.master.events.*; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.server.master.persistence.exceptions.JobClusterAlreadyExistsException; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.shaded.com.google.common.collect.Maps; import io.mantisrx.common.Label; import io.mantisrx.master.jobcluster.IJobClusterMetadata; import io.mantisrx.master.jobcluster.JobClusterMetadataImpl; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.IMantisStageMetadata; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.job.JobTestHelper; import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl; import io.mantisrx.master.jobcluster.job.MantisStageMetadataImpl; import io.mantisrx.master.jobcluster.job.worker.MantisWorkerMetadataImpl; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.descriptor.StageSchedulingInfo; import io.mantisrx.server.master.domain.IJobClusterDefinition; import io.mantisrx.server.master.domain.JobClusterConfig; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; public class SimpleCachedFileStorageProviderTest { private final ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); private final LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); @BeforeClass public static void setup() { // jobStore = new MantisJobStore(storageProvider); } @AfterClass public static void tearDown() { SimpleCachedFileStorageProvider sProvider = new SimpleCachedFileStorageProvider(); sProvider.deleteAllFiles(); } private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName, List<Label> labels) { JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName("myart") .withSchedulingInfo(new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(new MachineDefinition(1, 10, 10, 10, 2), Lists.newArrayList(), Lists.newArrayList()).build()) .withVersion("0.0.1") .build(); return new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(clusterConfig) .withName(clusterName) .withUser("user") .withLabels(labels) .withParameters(Lists.newArrayList()) .withIsReadyForJobMaster(true) .withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo")) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .build(); } @Test public void testCreateJob() { String clusterName = "testCreateJob"; SimpleCachedFileStorageProvider sProvider = new SimpleCachedFileStorageProvider(); IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName); JobDefinition jobDefinition; try { jobDefinition = JobTestHelper.generateJobDefinition(clusterName); JobId jobId = JobId.fromId(clusterName + "-1").get(); IMantisJobMetadata mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(jobId) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefinition) .build(); sProvider.storeNewJob(mantisJobMetaData); SchedulingInfo schedInfo = jobDefinition.getSchedulingInfo(); int numStages = schedInfo.getStages().size(); for(int s=1; s<=numStages; s++) { StageSchedulingInfo stage = schedInfo.getStages().get(s); IMantisStageMetadata msmd = new MantisStageMetadataImpl.Builder(). withJobId(jobId) .withStageNum(s) .withNumStages(1) .withMachineDefinition(stage.getMachineDefinition()) .withNumWorkers(stage.getNumberOfInstances()) .withHardConstraints(stage.getHardConstraints()) .withSoftConstraints(stage.getSoftConstraints()) .withScalingPolicy(stage.getScalingPolicy()) .isScalable(stage.getScalable()) .build(); ((MantisJobMetadataImpl)mantisJobMetaData).addJobStageIfAbsent(msmd); sProvider.updateMantisStage(msmd); for(int w=0; w<stage.getNumberOfInstances(); w++) { JobWorker mwmd = new JobWorker.Builder() .withJobId(jobId) .withWorkerIndex(w) .withWorkerNumber(1) .withNumberOfPorts(stage.getMachineDefinition().getNumPorts() + MANTIS_SYSTEM_ALLOCATED_NUM_PORTS) .withStageNum(w+1) .withLifecycleEventsPublisher(eventPublisher) .build(); ((MantisJobMetadataImpl)mantisJobMetaData).addWorkerMetadata(1, mwmd); sProvider.storeWorker(mwmd.getMetadata()); } } Optional<IMantisJobMetadata> loadedJobMetaOp = sProvider.loadActiveJob(jobId.getId()); assertTrue(loadedJobMetaOp.isPresent()); IMantisJobMetadata loadedJobMeta = loadedJobMetaOp.get(); System.out.println("Original Job -> " + mantisJobMetaData); System.out.println("Loaded Job ->" + loadedJobMeta); isEqual(mantisJobMetaData, loadedJobMeta); } catch(Exception e) { e.printStackTrace(); fail(); } } private void isEqual(IMantisJobMetadata orig, IMantisJobMetadata loaded) { assertEquals(orig.getJobId(), loaded.getJobId()); assertEquals(orig.getSubmittedAtInstant(), loaded.getSubmittedAtInstant()); assertEquals(orig.getSubscriptionTimeoutSecs(), loaded.getSubscriptionTimeoutSecs()); assertEquals(orig.getState(),loaded.getState()); assertEquals(orig.getNextWorkerNumberToUse(), loaded.getNextWorkerNumberToUse()); System.out.println("Orig JobDefn: " + orig.getJobDefinition()); System.out.println("load JobDefn: " + loaded.getJobDefinition()); assertEquals(orig.getJobDefinition().toString(),loaded.getJobDefinition().toString()); assertEquals(((MantisJobMetadataImpl)orig).getStageMetadata().size(),((MantisJobMetadataImpl)loaded).getStageMetadata().size()); assertEquals(((MantisJobMetadataImpl)orig).getTotalStages(),((MantisJobMetadataImpl)loaded).getTotalStages()); for(int s = 1; s <= ((MantisJobMetadataImpl)orig).getTotalStages(); s++) { assertTrue(((MantisJobMetadataImpl)loaded).getStageMetadata(s).isPresent()); System.out.println("orig stage: " + ((MantisJobMetadataImpl)orig).getStageMetadata(s).get()); System.out.println("load stage: " + ((MantisJobMetadataImpl)loaded).getStageMetadata(s).get()); assertEquals(((MantisJobMetadataImpl)orig).getStageMetadata(s).get().toString(),((MantisJobMetadataImpl)loaded).getStageMetadata(s).get().toString()); } } // @Test public void serde() throws IOException { String clusterName = "testCreateClusterClueter"; File tmpFile = new File("/tmp/MantisSpool/jobClusters" + "/" + clusterName); tmpFile.createNewFile(); IJobClusterDefinition jobClusterDefn = createFakeJobClusterDefn(clusterName, Lists.newArrayList()); PrintWriter pwrtr = new PrintWriter(tmpFile); mapper.writeValue(pwrtr, jobClusterDefn); try (FileInputStream fis = new FileInputStream(tmpFile)) { IJobClusterDefinition jobClustermeta = mapper.readValue(fis, JobClusterDefinitionImpl.class); System.out.println("read: " + jobClustermeta.getName()); } catch (Exception e) { e.printStackTrace(); } } @Test public void testCreateAndGetJobCluster() { SimpleCachedFileStorageProvider sProvider = new SimpleCachedFileStorageProvider(); String clusterName = "testCreateClusterClueter"; JobClusterDefinitionImpl jobClusterDefn = createFakeJobClusterDefn(clusterName, Lists.newArrayList()); IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder().withLastJobCount(0).withJobClusterDefinition(jobClusterDefn).build(); try { sProvider.createJobCluster(jobCluster); Optional<IJobClusterMetadata> readDataOp = sProvider.loadJobCluster(clusterName); if(readDataOp.isPresent()) { assertEquals(clusterName, readDataOp.get().getJobClusterDefinition().getName()); } else { fail(); } } catch(Exception e) { e.printStackTrace(); fail(); } } @Test public void testUpdateJobCluster() { SimpleCachedFileStorageProvider sProvider = new SimpleCachedFileStorageProvider(); String clusterName = "testUpdateJobCluster"; JobClusterDefinitionImpl jobClusterDefn = createFakeJobClusterDefn(clusterName, Lists.newArrayList()); IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder().withLastJobCount(0).withJobClusterDefinition(jobClusterDefn).build(); try { sProvider.createJobCluster(jobCluster); Optional<IJobClusterMetadata> readDataOp = sProvider.loadJobCluster(clusterName); if(readDataOp.isPresent()) { assertEquals(clusterName, readDataOp.get().getJobClusterDefinition().getName()); assertEquals(0, readDataOp.get().getJobClusterDefinition().getLabels().size()); } else { fail(); } List<Label> labels = Lists.newArrayList(); labels.add(new Label("label1", "label1value")); jobClusterDefn = createFakeJobClusterDefn(clusterName, labels); IJobClusterMetadata jobClusterUpdated = new JobClusterMetadataImpl.Builder().withLastJobCount(0).withJobClusterDefinition(jobClusterDefn).build(); sProvider.updateJobCluster(jobClusterUpdated); readDataOp = sProvider.loadJobCluster(clusterName); if(readDataOp.isPresent()) { assertEquals(clusterName, readDataOp.get().getJobClusterDefinition().getName()); assertEquals(1, readDataOp.get().getJobClusterDefinition().getLabels().size()); } else { fail(); } } catch(Exception e) { e.printStackTrace(); fail(); } } @Test public void testGetAllJobClusters() throws IOException, JobClusterAlreadyExistsException { SimpleCachedFileStorageProvider sProvider = new SimpleCachedFileStorageProvider(); String clusterPrefix = "testGetAllJobClustersCluster"; for(int i=0; i<5; i++) { JobClusterDefinitionImpl jobClusterDefn = createFakeJobClusterDefn(clusterPrefix + "_" + i, Lists.newArrayList()); IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder().withLastJobCount(0).withJobClusterDefinition(jobClusterDefn).build(); sProvider.createJobCluster(jobCluster); } List<IJobClusterMetadata> jobClusterList = sProvider.loadAllJobClusters(); assertTrue(jobClusterList.size() >= 5); Map<String, IJobClusterMetadata> clustersMap = Maps.newHashMap(); for(IJobClusterMetadata cluster : jobClusterList) { clustersMap.put(cluster.getJobClusterDefinition().getName(), cluster); } for(int i=0; i<5; i++) { assertTrue(clustersMap.containsKey(clusterPrefix + "_" + i)); } } }
4,209
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/server/master/domain/DataFormatAdapterTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import static java.util.Optional.of; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import java.time.Instant; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.common.Label; import io.mantisrx.common.WorkerPorts; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.IJobClusterMetadata; import io.mantisrx.master.jobcluster.JobClusterMetadataImpl; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.IMantisStageMetadata; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl; import io.mantisrx.master.jobcluster.job.MantisStageMetadataImpl; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.master.jobcluster.job.worker.MantisWorkerMetadataImpl; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.NamedJobDefinition; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.runtime.parameter.Parameter; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.master.store.MantisJobMetadata; import io.mantisrx.server.master.store.MantisStageMetadataWritable; import io.mantisrx.server.master.store.MantisWorkerMetadataWritable; import io.mantisrx.server.master.store.NamedJob; import org.junit.Test; public class DataFormatAdapterTest { public static final MachineDefinition DEFAULT_MACHINE_DEFINITION = new MachineDefinition(1, 10, 10, 10, 2); private static final SchedulingInfo DEFAULT_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(DEFAULT_MACHINE_DEFINITION, Lists.newArrayList(), Lists.newArrayList()).build(); private final LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); @Test public void jobClusterConfigToJarTest() { long uploadedAt = 1234l; String artifactName = "artifact1"; String version = "0.0.1"; JobClusterConfig config = new JobClusterConfig(artifactName, uploadedAt, version, DEFAULT_SCHED_INFO); try { NamedJob.Jar convertedJar = DataFormatAdapter.convertJobClusterConfigToJar(config); assertEquals(uploadedAt, convertedJar.getUploadedAt()); assertEquals("http://" + artifactName, convertedJar.getUrl().toString()); assertEquals(version, convertedJar.getVersion()); assertEquals(DEFAULT_SCHED_INFO,convertedJar.getSchedulingInfo()); JobClusterConfig regeneratedConfig = DataFormatAdapter.convertJarToJobClusterConfig(convertedJar); assertEquals(uploadedAt, regeneratedConfig.getUploadedAt()); assertEquals(artifactName, regeneratedConfig.getArtifactName()); assertEquals(version, regeneratedConfig.getVersion()); assertEquals(DEFAULT_SCHED_INFO, regeneratedConfig.getSchedulingInfo()); } catch (MalformedURLException e) { fail(); e.printStackTrace(); } } @Test public void artifactNameTest() { String artifactName = "myartifact-0.0.1.zip"; String version = "0.0.1"; try { URL jar = DataFormatAdapter.generateURL(artifactName); assertEquals("http://myartifact-0.0.1.zip", jar.toString()); assertEquals(artifactName, DataFormatAdapter.extractArtifactName(jar).orElse("")); } catch (MalformedURLException e) { e.printStackTrace(); fail(); } } @Test public void artifactNameTest2() { String artifactName = "https://myartifact-0.0.1.zip"; String version = "0.0.1"; try { URL jar = DataFormatAdapter.generateURL(artifactName); assertEquals("https://myartifact-0.0.1.zip", jar.toString()); assertEquals("myartifact-0.0.1.zip", DataFormatAdapter.extractArtifactName(jar).orElse("")); } catch (MalformedURLException e) { e.printStackTrace(); fail(); } } @Test public void extractArtifactNameTest1() throws MalformedURLException { URL url = new URL("http://mantisui.eu-west-1.dyntest.netflix.net/mantis-artifacts/nfmantis-sources-genericqueryable-source-6.0.8.zip"); assertEquals("nfmantis-sources-genericqueryable-source-6.0.8.zip",DataFormatAdapter.extractArtifactName(url).orElse("")); } @Test public void extractArtifactNameTest2() throws MalformedURLException { URL url = new URL("http://nfmantis-sources-genericqueryable-source-6.0.8.zip"); assertEquals("nfmantis-sources-genericqueryable-source-6.0.8.zip",DataFormatAdapter.extractArtifactName(url).orElse("")); } @Test public void slaConversionTestWithCronSpec() { int min = 1; int max = 10; String cronSpec = "0 0 0-23 * * ?"; io.mantisrx.server.master.domain.SLA sla = new SLA(min, max, cronSpec,IJobClusterDefinition.CronPolicy.KEEP_EXISTING); NamedJob.SLA oldSlaFormat = DataFormatAdapter.convertSLAToNamedJobSLA(sla); // assertEquals(min, oldSlaFormat.getMin()); // assertEquals(max, oldSlaFormat.getMax()); assertEquals(cronSpec, oldSlaFormat.getCronSpec()); assertEquals(NamedJobDefinition.CronPolicy.KEEP_EXISTING,oldSlaFormat.getCronPolicy()); SLA reconvertedSLA = DataFormatAdapter.convertToSLA(oldSlaFormat); assertEquals(sla, reconvertedSLA); } @Test public void slaConversionTestNoCronSpec() { int min = 1; int max = 10; String cronSpec = "0 0 0-23 * * ?"; io.mantisrx.server.master.domain.SLA sla = new SLA(min, max, null,null); NamedJob.SLA oldSlaFormat = DataFormatAdapter.convertSLAToNamedJobSLA(sla); assertEquals(min, oldSlaFormat.getMin()); assertEquals(max, oldSlaFormat.getMax()); // assertEquals(cronSpec, oldSlaFormat.getCronSpec()); // assertEquals(NamedJobDefinition.CronPolicy.KEEP_EXISTING,oldSlaFormat.getCronPolicy()); SLA reconvertedSLA = DataFormatAdapter.convertToSLA(oldSlaFormat); assertEquals(sla, reconvertedSLA); } @Test public void jobClusterMetadataConversionTest() { String artifactName = "artifact1"; String version = "0.0.1"; List<Parameter> parameterList = new ArrayList<>(); Parameter parameter = new Parameter("param1", "value1"); parameterList.add(parameter); List<Label> labels = new ArrayList<>(); Label label = new Label("label1", "labelvalue1"); labels.add(label); long uAt = 1234l; JobClusterConfig jobClusterConfig = new JobClusterConfig.Builder() .withArtifactName(artifactName) .withSchedulingInfo(DEFAULT_SCHED_INFO) .withVersion(version) .withUploadedAt(uAt) .build(); String clusterName = "clusterName1"; JobOwner owner = new JobOwner("Neeraj", "Mantis", "desc", "nma@netflix.com", "repo"); boolean isReadyForMaster = true; SLA sla = new SLA(1, 10, null, null); JobClusterDefinitionImpl clusterDefn = new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(jobClusterConfig) .withName(clusterName) .withUser("user1") .withIsReadyForJobMaster(isReadyForMaster) .withOwner(owner) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .withSla(sla) .withParameters(parameterList) .withLabels(labels) .build(); int lastJobCnt = 10; boolean disabled = false; IJobClusterMetadata clusterMeta = new JobClusterMetadataImpl.Builder() .withJobClusterDefinition(clusterDefn) .withLastJobCount(lastJobCnt) .withIsDisabled(disabled) .build(); NamedJob namedJob = DataFormatAdapter.convertJobClusterMetadataToNamedJob(clusterMeta); assertEquals(disabled,namedJob.getDisabled()); assertEquals(clusterName, namedJob.getName()); assertEquals(lastJobCnt,namedJob.getLastJobCount()); assertEquals(1, namedJob.getLabels().size()); assertEquals(label, namedJob.getLabels().get(0)); assertEquals(owner, namedJob.getOwner()); assertEquals(isReadyForMaster, namedJob.getIsReadyForJobMaster()); assertEquals(WorkerMigrationConfig.DEFAULT, namedJob.getMigrationConfig()); // assert parameters assertEquals(parameterList.size(), namedJob.getParameters().size()); assertEquals(parameter, namedJob.getParameters().get(0)); // assert sla assertEquals(sla.getMin(), namedJob.getSla().getMin()); assertEquals(sla.getMax(), namedJob.getSla().getMax()); // assert jar info assertEquals(1, namedJob.getJars().size()); // jar info NamedJob.Jar jar = namedJob.getJars().get(0); assertEquals(uAt, jar.getUploadedAt()); assertEquals(DEFAULT_SCHED_INFO,jar.getSchedulingInfo()); assertEquals(version, jar.getVersion()); assertEquals(artifactName, DataFormatAdapter.extractArtifactName(jar.getUrl()).orElse("")); IJobClusterMetadata reconvertedJobCluster = DataFormatAdapter.convertNamedJobToJobClusterMetadata(namedJob); assertEquals(disabled,reconvertedJobCluster.isDisabled()); assertEquals(clusterName,reconvertedJobCluster.getJobClusterDefinition().getName()); assertEquals(lastJobCnt,reconvertedJobCluster.getLastJobCount()); assertEquals(1, reconvertedJobCluster.getJobClusterDefinition().getLabels().size()); assertEquals(label, reconvertedJobCluster.getJobClusterDefinition().getLabels().get(0)); assertEquals(owner, reconvertedJobCluster.getJobClusterDefinition().getOwner()); assertEquals(isReadyForMaster,reconvertedJobCluster.getJobClusterDefinition().getIsReadyForJobMaster()); assertEquals(WorkerMigrationConfig.DEFAULT,reconvertedJobCluster.getJobClusterDefinition().getWorkerMigrationConfig()); assertEquals(parameterList.size(), reconvertedJobCluster.getJobClusterDefinition().getParameters().size()); assertEquals(parameter, reconvertedJobCluster.getJobClusterDefinition().getParameters().get(0)); assertEquals(sla.getMin(), reconvertedJobCluster.getJobClusterDefinition().getSLA().getMin()); assertEquals(sla.getMax(), reconvertedJobCluster.getJobClusterDefinition().getSLA().getMax()); JobClusterConfig clusterConfig1 = reconvertedJobCluster.getJobClusterDefinition().getJobClusterConfig(); assertEquals(uAt,clusterConfig1.getUploadedAt()); assertEquals(DEFAULT_SCHED_INFO,clusterConfig1.getSchedulingInfo()); assertEquals(version,clusterConfig1.getVersion()); assertEquals(artifactName, clusterConfig1.getArtifactName()); } @Test public void completedJobToNamedJobCompletedJobTest() { String name = "name"; String jobId = "name-1"; String version = "0.0.1"; JobState jobState = JobState.Completed; long submittedAt = 1234l; long terminatedAt = 2234l; String me = "me"; List<Label> labels = new ArrayList<>(); labels.add(new Label("l1","v1")); JobClusterDefinitionImpl.CompletedJob cJob = new JobClusterDefinitionImpl.CompletedJob( name, jobId, version, jobState, submittedAt, terminatedAt, me, labels); NamedJob.CompletedJob njobCJob = DataFormatAdapter.convertCompletedJobToNamedJobCompletedJob(cJob); assertEquals(name,njobCJob.getName()); assertEquals(jobId,njobCJob.getJobId()); assertEquals(version,njobCJob.getVersion()); assertEquals(MantisJobState.Completed,njobCJob.getState()); assertEquals(submittedAt,njobCJob.getSubmittedAt()); assertEquals(terminatedAt,njobCJob.getTerminatedAt()); JobClusterDefinitionImpl.CompletedJob reconverted = DataFormatAdapter.convertNamedJobCompletedJobToCompletedJob(njobCJob); assertEquals(cJob,reconverted); } @Test public void oldMantisWorkerMetadataReadTest() throws IOException { ObjectMapper mapper = new ObjectMapper().registerModule(new Jdk8Module()); final String oldWorkerMetadataWriteableStr = "{\n" + " \"workerIndex\": 0,\n" + " \"workerNumber\": 1,\n" + " \"jobId\": \"cname-1\",\n" + " \"stageNum\": 1,\n" + " \"numberOfPorts\": 3,\n" + " \"metricsPort\": 1,\n" + " \"consolePort\": 3,\n" + " \"debugPort\": 2,\n" + " \"customPort\": 5,\n" + " \"ports\": [4],\n" + " \"state\": \"Completed\",\n" + " \"slave\": \"slave1\",\n" + " \"slaveID\": \"slaveId1\",\n" + " \"cluster\": \"prefCluster\",\n" + " \"acceptedAt\": 999,\n" + " \"launchedAt\": 1000,\n" + " \"startingAt\": 1234,\n" + " \"startedAt\": 1001,\n" + " \"completedAt\": 2000,\n" + " \"reason\": \"Normal\",\n" + " \"resubmitOf\": 42,\n" + " \"totalResubmitCount\": 1\n" + "}"; MantisWorkerMetadataWritable oldMetadataWritable = mapper.readValue(oldWorkerMetadataWriteableStr, MantisWorkerMetadataWritable.class); Optional<String> prefCluster = of("prefCluster"); int metricsPort = 1; int debugPort = 2; int consolePort = 3; int customPort = 5; int ssePort = 4; List<Integer> ports = Lists.newArrayList(); ports.add(metricsPort); ports.add(debugPort); ports.add(consolePort); ports.add(customPort); ports.add(ssePort); WorkerPorts workerPorts = new WorkerPorts(ports); int workerNum = 1; int workerIndex = 0; long startingAt = 1234l; int stageNum = 1; String slaveid = "slaveId1"; String slave = "slave1"; int resubmitCnt = 1; int portNums = ports.size(); long launchedAt = 1000l; JobId jobId = new JobId("cname", 1); long acceptedAt = 999l; long completedAt = 2000l; long startedAt = 1001l; int resubOf = 42; JobWorker worker = new JobWorker.Builder() .withPreferredCluster(prefCluster) .withJobCompletedReason(JobCompletedReason.Normal) .withWorkerPorts(workerPorts) .withWorkerNumber(workerNum) .withWorkerIndex(workerIndex) .withState(WorkerState.Completed) .withStartingAt(startingAt) .withStartedAt(startedAt) .withCompletedAt(completedAt) .withStageNum(stageNum) .withSlaveID(slaveid) .withSlave(slave) .withResubmitCount(resubmitCnt) .withResubmitOf(resubOf) .withNumberOfPorts(portNums) .withLaunchedAt(launchedAt) .withJobId(jobId) .withAcceptedAt(acceptedAt) .withLifecycleEventsPublisher(eventPublisher) .build(); IMantisWorkerMetadata expectedWorkerMeta = worker.getMetadata(); assertEquals(prefCluster,oldMetadataWritable.getCluster()); assertEquals(workerIndex, oldMetadataWritable.getWorkerIndex()); assertEquals(workerNum, oldMetadataWritable.getWorkerNumber()); assertEquals(jobId.getId(),oldMetadataWritable.getJobId()); assertEquals(acceptedAt,oldMetadataWritable.getAcceptedAt()); assertEquals(startingAt,oldMetadataWritable.getStartingAt()); assertEquals(startedAt, oldMetadataWritable.getStartedAt()); assertEquals(launchedAt, oldMetadataWritable.getLaunchedAt()); assertEquals(completedAt, oldMetadataWritable.getCompletedAt()); assertEquals(stageNum, oldMetadataWritable.getStageNum()); assertEquals(slave, oldMetadataWritable.getSlave()); assertEquals(slaveid, oldMetadataWritable.getSlaveID()); assertEquals(metricsPort, oldMetadataWritable.getMetricsPort()); assertEquals(consolePort, oldMetadataWritable.getConsolePort()); assertEquals(debugPort, oldMetadataWritable.getDebugPort()); assertEquals(5, oldMetadataWritable.getCustomPort()); assertEquals(MantisJobState.Completed, oldMetadataWritable.getState()); assertEquals(resubmitCnt, oldMetadataWritable.getTotalResubmitCount()); assertEquals(resubOf, oldMetadataWritable.getResubmitOf()); assertEquals(3, oldMetadataWritable.getNumberOfPorts()); assertEquals(1, oldMetadataWritable.getPorts().size()); assertEquals(ssePort, (long)oldMetadataWritable.getPorts().get(0)); assertEquals(JobCompletedReason.Normal, oldMetadataWritable.getReason()); JobWorker convertedMetadata = DataFormatAdapter.convertMantisWorkerMetadataWriteableToMantisWorkerMetadata(oldMetadataWritable, eventPublisher); assertEquals(expectedWorkerMeta, convertedMetadata.getMetadata()); } @Test public void mantisWorkerMetadataToMetadataWritebleTest() { Optional<String> prefCluster = of("prefCluster"); int metricsPort = 1; int debugPort = 2; int consolePort = 3; int customPort = 4; int ssePort = 5; List<Integer> ports = Lists.newArrayList(); ports.add(metricsPort); ports.add(debugPort); ports.add(consolePort); ports.add(customPort); ports.add(ssePort); WorkerPorts workerPorts = new WorkerPorts(ports); int workerNum = 1; int workerIndex = 0; long startingAt = 1234l; int stageNum = 1; String slaveid = "slaveId1"; String slave = "slave1"; int resubmitCnt = 1; int portNums = ports.size(); long launchedAt = 1000l; JobId jobId = new JobId("cname", 1); long acceptedAt = 999l; long completedAt = 2000l; long startedAt = 1001l; int resubOf = 42; JobWorker worker = new JobWorker.Builder() .withPreferredCluster(prefCluster) .withJobCompletedReason(JobCompletedReason.Normal) .withWorkerPorts(workerPorts) .withWorkerNumber(workerNum) .withWorkerIndex(workerIndex) .withState(WorkerState.Completed) .withStartingAt(startingAt) .withStartedAt(startedAt) .withCompletedAt(completedAt) .withStageNum(stageNum) .withSlaveID(slaveid) .withSlave(slave) .withResubmitCount(resubmitCnt) .withResubmitOf(resubOf) .withNumberOfPorts(portNums) .withLaunchedAt(launchedAt) .withJobId(jobId) .withAcceptedAt(acceptedAt) .withLifecycleEventsPublisher(eventPublisher) .build(); IMantisWorkerMetadata workerMeta = worker.getMetadata(); MantisWorkerMetadataWritable metadataWritable = DataFormatAdapter.convertMantisWorkerMetadataToMantisWorkerMetadataWritable(workerMeta); assertEquals(prefCluster,metadataWritable.getCluster()); assertEquals(workerIndex, metadataWritable.getWorkerIndex()); assertEquals(workerNum, metadataWritable.getWorkerNumber()); assertEquals(jobId.getId(),metadataWritable.getJobId()); assertEquals(acceptedAt,metadataWritable.getAcceptedAt()); assertEquals(startingAt,metadataWritable.getStartingAt()); assertEquals(startedAt, metadataWritable.getStartedAt()); assertEquals(launchedAt, metadataWritable.getLaunchedAt()); assertEquals(completedAt, metadataWritable.getCompletedAt()); assertEquals(stageNum, metadataWritable.getStageNum()); assertEquals(slave, metadataWritable.getSlave()); assertEquals(slaveid, metadataWritable.getSlaveID()); assertEquals(metricsPort, metadataWritable.getMetricsPort()); assertEquals(consolePort, metadataWritable.getConsolePort()); assertEquals(debugPort, metadataWritable.getDebugPort()); assertEquals(customPort, metadataWritable.getCustomPort()); assertEquals(MantisJobState.Completed, metadataWritable.getState()); assertEquals(resubmitCnt, metadataWritable.getTotalResubmitCount()); assertEquals(resubOf, metadataWritable.getResubmitOf()); assertEquals(portNums, metadataWritable.getNumberOfPorts()); assertEquals(1, metadataWritable.getPorts().size()); assertEquals(ssePort, (long)metadataWritable.getPorts().get(0)); assertEquals(JobCompletedReason.Normal, metadataWritable.getReason()); JobWorker reconverted = DataFormatAdapter.convertMantisWorkerMetadataWriteableToMantisWorkerMetadata(metadataWritable, eventPublisher); assertEquals(workerMeta, reconverted.getMetadata()); } @Test public void convertMantisStageMetaTest() { Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>(); smap.put(StageScalingPolicy.ScalingReason.CPU, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.CPU, 0.5, 0.75, null)); smap.put(StageScalingPolicy.ScalingReason.DataDrop, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.DataDrop, 0.0, 2.0, null)); int stageNo = 1; int min = 3; int max = 10; int increment = 1; int decrement = 1; int coolDownSecs = 300; StageScalingPolicy stageScalingPolicy = new StageScalingPolicy(stageNo, min, max, increment, decrement, coolDownSecs, smap); List<JobConstraints> softConstraintsList = new ArrayList<>(); softConstraintsList.add(JobConstraints.ExclusiveHost); List<JobConstraints> hardConstraintsList = new ArrayList<>(); hardConstraintsList.add(JobConstraints.M3Cluster); JobId jobId = new JobId("cName",1); int numWorkers = 1; int numStages = 2; boolean isScalable = true; IMantisStageMetadata stageMeta = new MantisStageMetadataImpl.Builder() .withStageNum(stageNo) .withScalingPolicy(stageScalingPolicy) .withNumWorkers(numWorkers) .withMachineDefinition(DEFAULT_MACHINE_DEFINITION) .withNumStages(numStages) .withSoftConstraints(softConstraintsList) .withHardConstraints(hardConstraintsList) .withJobId(jobId) .isScalable(isScalable) .build(); MantisStageMetadataWritable stageMetadataWritable = DataFormatAdapter.convertMantisStageMetadataToMantisStageMetadataWriteable(stageMeta); assertEquals(jobId.getId(),stageMetadataWritable.getJobId()); assertEquals(JobConstraints.M3Cluster,stageMetadataWritable.getHardConstraints().get(0)); assertEquals(JobConstraints.ExclusiveHost, stageMetadataWritable.getSoftConstraints().get(0)); assertEquals(stageScalingPolicy, stageMetadataWritable.getScalingPolicy()); assertTrue(stageMetadataWritable.getScalable()); assertEquals(DEFAULT_MACHINE_DEFINITION, stageMetadataWritable.getMachineDefinition()); assertEquals(numWorkers, stageMetadataWritable.getNumWorkers()); assertEquals(numStages, stageMetadataWritable.getNumStages()); assertEquals(stageNo,stageMetadataWritable.getStageNum()); IMantisStageMetadata reconverted = DataFormatAdapter.convertMantisStageMetadataWriteableToMantisStageMetadata(stageMetadataWritable, eventPublisher); assertEquals(stageMeta,reconverted); } @Test public void convertMantisJobWriteableTest() throws Exception { String artifactName = "artifact"; String version = "1.0.0"; String clusterName = "myCluster"; List<Label> labels = new ArrayList<>(); Label label = new Label("myLable","myVal"); labels.add(label); List<Parameter> params = new ArrayList<>(); Parameter param = new Parameter("myparam", "myval"); params.add(param); long subTimeout = 1000; JobSla jobSla = new JobSla(100,10,JobSla.StreamSLAType.Lossy,MantisJobDurationType.Perpetual,"userType"); JobDefinition jobDefn = new JobDefinition.Builder() .withArtifactName(artifactName) .withName(clusterName) .withLabels(labels) .withParameters(params) .withSchedulingInfo(DEFAULT_SCHED_INFO) .withUser("user") .withJobSla(jobSla) .withSubscriptionTimeoutSecs(subTimeout) .withNumberOfStages(DEFAULT_SCHED_INFO.getStages().size()) .build(); JobId jobId = new JobId(clusterName,1); long currTime = System.currentTimeMillis(); Instant startedAt = Instant.ofEpochMilli(currTime); Instant endedAt = startedAt.plusSeconds(5); Instant submittedAt = startedAt.minusSeconds(5); IMantisJobMetadata jobmeta = new MantisJobMetadataImpl.Builder() .withJobDefinition(jobDefn) .withJobId(jobId) .withNextWorkerNumToUse(2) .withSubmittedAt(submittedAt) .withJobState(JobState.Launched) .build(); IMantisWorkerMetadata workerMetadata = new MantisWorkerMetadataImpl(0, 1, jobId.getId(), 1,3, new WorkerPorts(Lists.newArrayList(8000, 9000, 9010, 9020, 9030)), WorkerState.Started, "slave","slaveId",startedAt.toEpochMilli(),startedAt.toEpochMilli(), startedAt.toEpochMilli(),startedAt.toEpochMilli(),-1,JobCompletedReason.Normal, 0,0,of("cluster")); ((MantisJobMetadataImpl) jobmeta).addJobStageIfAbsent(new MantisStageMetadataImpl.Builder() .withNumStages(1) .withStageNum(1) .withNumWorkers(1) .withJobId(jobId) .withHardConstraints(Lists.newArrayList()) .withSoftConstraints(Lists.newArrayList()) .withMachineDefinition(DEFAULT_MACHINE_DEFINITION) .build()); ((MantisJobMetadataImpl) jobmeta).addWorkerMetadata(1, new JobWorker(workerMetadata,eventPublisher)); MantisJobMetadata oldFormat = DataFormatAdapter.convertMantisJobMetadataToMantisJobMetadataWriteable(jobmeta); System.out.println("oldForamt -> " + oldFormat); assertEquals(jobId.getId(), oldFormat.getJobId()); assertEquals(label,oldFormat.getLabels().get(0)); assertEquals(param,oldFormat.getParameters().get(0)); assertEquals(clusterName,oldFormat.getName()); assertEquals(jobSla,oldFormat.getSla()); assertEquals(1,oldFormat.getNumStages()); assertEquals(subTimeout,oldFormat.getSubscriptionTimeoutSecs()); assertEquals(2,oldFormat.getNextWorkerNumberToUse()); assertEquals("http://" + artifactName,oldFormat.getJarUrl().toString()); assertEquals(MantisJobState.Launched, oldFormat.getState()); assertEquals(submittedAt.toEpochMilli(),oldFormat.getSubmittedAt()); assertEquals("user",oldFormat.getUser()); IMantisJobMetadata reconverted = DataFormatAdapter.convertMantisJobWriteableToMantisJobMetadata(oldFormat, eventPublisher); System.out.println("newForamt -> " + reconverted); //assertEquals(jobmeta, reconverted); // assertTrue(jobmeta.equals(reconverted)); assertEquals(jobmeta.getArtifactName(),reconverted.getArtifactName()); assertEquals(jobmeta.getClusterName(),reconverted.getClusterName()); System.out.println("expected Jobdef " + jobmeta.getJobDefinition()); System.out.println("actual Jobdef " + reconverted.getJobDefinition()); assertEquals(jobmeta.getJobDefinition(),reconverted.getJobDefinition()); assertEquals(jobmeta.getJobId(),reconverted.getJobId()); assertEquals(jobmeta.getJobJarUrl(),reconverted.getJobJarUrl()); assertEquals(jobmeta.getLabels().get(0),reconverted.getLabels().get(0)); assertEquals(jobmeta.getParameters().get(0),reconverted.getParameters().get(0)); assertEquals(jobmeta.getMinRuntimeSecs(),reconverted.getMinRuntimeSecs()); assertEquals(jobmeta.getNextWorkerNumberToUse(),reconverted.getNextWorkerNumberToUse()); assertEquals(jobmeta.getSla().get(),reconverted.getSla().get()); assertEquals(jobmeta.getSubmittedAtInstant(),reconverted.getSubmittedAtInstant()); assertEquals(jobmeta.getState(),reconverted.getState()); assertEquals(jobmeta.getSubscriptionTimeoutSecs(),reconverted.getSubscriptionTimeoutSecs()); assertEquals(jobmeta.getTotalStages(),reconverted.getTotalStages()); assertEquals(jobmeta.getUser(),reconverted.getUser()); // assertEquals(jobmeta.getSchedulingInfo(), reconverted.getSchedulingInfo()); } }
4,210
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/server/master/domain/JobIdTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import org.junit.Test; import io.mantisrx.server.master.domain.JobId; import java.util.Optional; import static org.junit.Assert.assertEquals; public class JobIdTest { @Test public void testJobId() { final JobId jobId = new JobId("clustername", 10); final String idString1 = jobId.toString(); final Optional<JobId> fromId = JobId.fromId(idString1); assert(fromId.isPresent()); assertEquals(jobId, fromId.get()); final String idString2 = jobId.getId(); final Optional<JobId> fromId2 = JobId.fromId(idString2); assert(fromId2.isPresent()); assertEquals(jobId, fromId2.get()); } }
4,211
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/server/master/domain/JobClusterConfigTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.descriptor.SchedulingInfo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import org.junit.Test; public class JobClusterConfigTest { private static final SchedulingInfo DEFAULT_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(new MachineDefinition(1, 10, 10, 10, 2), Lists.newArrayList(), Lists.newArrayList()).build(); @Test public void happyTest() { String name = "happyTest"; JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName("myart") .withSchedulingInfo(DEFAULT_SCHED_INFO) .withVersion("0.0.1") .build(); try { final JobClusterDefinitionImpl fakeJobCluster = new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(clusterConfig) .withName(name) .withUser("nj") .withParameters(Lists.newArrayList()) .withIsReadyForJobMaster(true) .withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo")) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .build(); } catch(Exception e) { fail(); } } @Test(expected = Exception.class) public void noSchedInfoFails() { String name = "noSchedInfoFails"; JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName("myart") .withSchedulingInfo(null) .withVersion("0.0.1") .build(); final JobClusterDefinitionImpl fakeJobCluster = new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(clusterConfig) .withName(name) .withParameters(Lists.newArrayList()) .withUser("nj") .withIsReadyForJobMaster(true) .withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo")) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .build(); } @Test(expected = Exception.class) public void noArtifactNameFails() { String name = "noArtifactNameFails"; JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName(null) .withSchedulingInfo(DEFAULT_SCHED_INFO) .withVersion("0.0.1") .build(); final JobClusterDefinitionImpl fakeJobCluster = new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(clusterConfig) .withName(name) .withUser("nj") .withParameters(Lists.newArrayList()) .withIsReadyForJobMaster(true) .withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo")) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .build(); } @Test public void noVersionAutogenerate() { String name = "noArtifactNameFails"; JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName("myart") .withSchedulingInfo(DEFAULT_SCHED_INFO) .build(); final JobClusterDefinitionImpl fakeJobCluster = new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(clusterConfig) .withName(name) .withUser("nj") .withParameters(Lists.newArrayList()) .withIsReadyForJobMaster(true) .withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo")) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .build(); assertTrue(clusterConfig.getVersion() != null); } @Test public void jobClusterDefnTest() { String name = "jobClusterDefnTest"; JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName("myart") .withSchedulingInfo(DEFAULT_SCHED_INFO) .withVersion("0.0.1") .build(); try { // null cluster config is not allowed final JobClusterDefinitionImpl fakeJobCluster = new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(null) .withName(name) .withUser("nj") .withParameters(Lists.newArrayList()) .withIsReadyForJobMaster(true) .withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo")) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .build(); fail(); } catch(Exception e) { } try { // cluster name is not specified final JobClusterDefinitionImpl fakeJobCluster = new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(clusterConfig) .withUser("nj") .withIsReadyForJobMaster(true) .withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo")) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .build(); fail(); } catch(Exception e) { } } }
4,212
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/scheduler/AgentsErrorMonitorTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.scheduler; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.testkit.javadsl.TestKit; import io.mantisrx.master.events.LifecycleEventsProto; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import rx.functions.Action1; import java.util.ArrayList; import java.util.List; import static io.mantisrx.master.scheduler.AgentsErrorMonitorActor.props; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; public class AgentsErrorMonitorTest { static ActorSystem system; private static TestKit probe; @BeforeClass public static void setup() { system = ActorSystem.create(); probe = new TestKit(system); } @AfterClass public static void tearDown() { TestKit.shutdownActorSystem(system); system = null; } @Test public void hostErrorTest_disableHost() { EnableHostAction enableHostAction = new EnableHostAction(); AgentsErrorMonitorActor.HostErrors hostErrors = new AgentsErrorMonitorActor.HostErrors("host1", enableHostAction, 120000, 3); long t1 = 1000; LifecycleEventsProto.WorkerStatusEvent workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent)); t1+=100; workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent)); t1+=100; workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent)); t1+=100; assertEquals(0,enableHostAction.getEnableHostList().size()); // no of errors is now 4 which is greater than 3 workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); assertTrue(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent)); } @Test public void hostErrorTest_enableHost() { EnableHostAction enableHostAction = new EnableHostAction(); AgentsErrorMonitorActor.HostErrors hostErrors = new AgentsErrorMonitorActor.HostErrors("host1", enableHostAction, 120000, 3); long t1 = 1000; LifecycleEventsProto.WorkerStatusEvent workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent)); t1+=100; workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent)); t1+=100; workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent)); t1+=100; assertEquals(0,enableHostAction.getEnableHostList().size()); // no of errors is now 4 which is greater than 3 workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); assertTrue(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent)); workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Started, t1, "host1"); assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent)); // 4th event comes in with a non terminal event. This should reenable the host assertEquals(1, enableHostAction.getEnableHostList().size()); assertEquals("host1", enableHostAction.getEnableHostList().get(0)); } @Test public void basicTest() { long too_old_millis = 4000; int error_check_window_count = 3; long error_check_window_millis = 2000; long disableDuration = 1000; long t1 = 1000; ActorRef errorMonitorActor = system.actorOf(props(too_old_millis, error_check_window_count, error_check_window_millis,disableDuration)); MantisScheduler schedulerMock = mock(MantisScheduler.class); errorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(schedulerMock), probe.getRef()); LifecycleEventsProto.WorkerStatusEvent workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); errorMonitorActor.tell(workerStatusEvent,probe.getRef()); t1+=100; workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); errorMonitorActor.tell(workerStatusEvent,probe.getRef()); t1+=100; workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); errorMonitorActor.tell(workerStatusEvent,probe.getRef()); t1+=100; errorMonitorActor.tell(new AgentsErrorMonitorActor.HostErrorMapRequest(), probe.getRef()); AgentsErrorMonitorActor.HostErrorMapResponse hostErrorMapResponse = probe.expectMsgClass(AgentsErrorMonitorActor.HostErrorMapResponse.class); assertTrue(hostErrorMapResponse.getMap().containsKey("host1")); List<Long> errorTsList = hostErrorMapResponse.getMap().get("host1").getErrorTimestampList(); assertEquals(3, errorTsList.size()); workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); errorMonitorActor.tell(workerStatusEvent,probe.getRef()); errorMonitorActor.tell(new AgentsErrorMonitorActor.HostErrorMapRequest(), probe.getRef()); hostErrorMapResponse = probe.expectMsgClass(AgentsErrorMonitorActor.HostErrorMapResponse.class); assertTrue(hostErrorMapResponse.getMap().containsKey("host1")); errorTsList = hostErrorMapResponse.getMap().get("host1").getErrorTimestampList(); assertEquals(4, errorTsList.size()); verify(schedulerMock, times(1)).disableVM("host1",disableDuration); } @Test public void testOldHostEviction() { EnableHostAction enableHostAction = new EnableHostAction(); DisableHostAction disableHostAction = new DisableHostAction(); long too_old_millis = 4000; int error_check_window_count = 3; long error_check_window_millis = 2000; long t1 = 1000; ActorRef errorMonitorActor = system.actorOf(props(too_old_millis, error_check_window_count, error_check_window_millis, 1000)); MantisScheduler schedulerMock = mock(MantisScheduler.class); errorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(schedulerMock), probe.getRef()); LifecycleEventsProto.WorkerStatusEvent workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); errorMonitorActor.tell(workerStatusEvent,probe.getRef()); t1+=100; workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); errorMonitorActor.tell(workerStatusEvent,probe.getRef()); t1+=100; workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1"); errorMonitorActor.tell(workerStatusEvent,probe.getRef()); t1+=100; // ensure host 1 is registered in error map errorMonitorActor.tell(new AgentsErrorMonitorActor.HostErrorMapRequest(), probe.getRef()); AgentsErrorMonitorActor.HostErrorMapResponse hostErrorMapResponse = probe.expectMsgClass(AgentsErrorMonitorActor.HostErrorMapResponse.class); assertTrue(hostErrorMapResponse.getMap().containsKey("host1")); // simulate periodic check in the future errorMonitorActor.tell(new AgentsErrorMonitorActor.CheckHostHealthMessage(t1+ 100000), probe.getRef()); // host1 should've been evicted as no new events were seen from it errorMonitorActor.tell(new AgentsErrorMonitorActor.HostErrorMapRequest(), probe.getRef()); hostErrorMapResponse = probe.expectMsgClass(AgentsErrorMonitorActor.HostErrorMapResponse.class); assertTrue(hostErrorMapResponse.getMap().isEmpty()); } @Test public void noHostEventIgnoredTest() { EnableHostAction enableHostAction = new EnableHostAction(); DisableHostAction disableHostAction = new DisableHostAction(); long too_old_millis = 4000; int error_check_window_count = 3; long error_check_window_millis = 2000; long t1 = 1000; ActorRef errorMonitorActor = system.actorOf(props( too_old_millis, error_check_window_count, error_check_window_millis, 1000)); MantisScheduler schedulerMock = mock(MantisScheduler.class); errorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(schedulerMock), probe.getRef()); LifecycleEventsProto.WorkerStatusEvent workerStatusEvent = new LifecycleEventsProto.WorkerStatusEvent( LifecycleEventsProto.StatusEvent.StatusEventType.INFO, "test message", 1, WorkerId.fromId("sine-function-1-worker-0-4").get(), WorkerState.Failed, 1000); errorMonitorActor.tell(workerStatusEvent,probe.getRef()); errorMonitorActor.tell(new AgentsErrorMonitorActor.HostErrorMapRequest(), probe.getRef()); AgentsErrorMonitorActor.HostErrorMapResponse hostErrorMapResponse = probe.expectMsgClass(AgentsErrorMonitorActor.HostErrorMapResponse.class); assertTrue(hostErrorMapResponse.getMap().isEmpty()); } @Test public void eventFromWorkerNotYetOnHostIgnoredTest() { EnableHostAction enableHostAction = new EnableHostAction(); DisableHostAction disableHostAction = new DisableHostAction(); long too_old_millis = 4000; int error_check_window_count = 3; long error_check_window_millis = 2000; long t1 = 1000; ActorRef errorMonitorActor = system.actorOf(props(too_old_millis, error_check_window_count, error_check_window_millis, 1000)); MantisScheduler schedulerMock = mock(MantisScheduler.class); errorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(schedulerMock), probe.getRef()); LifecycleEventsProto.WorkerStatusEvent workerStatusEvent = new LifecycleEventsProto.WorkerStatusEvent( LifecycleEventsProto.StatusEvent.StatusEventType.INFO, "test message", 1, WorkerId.fromId("sine-function-1-worker-0-4").get(), WorkerState.Launched, "host1", 1000); errorMonitorActor.tell(workerStatusEvent,probe.getRef()); errorMonitorActor.tell(new AgentsErrorMonitorActor.HostErrorMapRequest(), probe.getRef()); AgentsErrorMonitorActor.HostErrorMapResponse hostErrorMapResponse = probe.expectMsgClass(AgentsErrorMonitorActor.HostErrorMapResponse.class); assertTrue(hostErrorMapResponse.getMap().isEmpty()); } private LifecycleEventsProto.WorkerStatusEvent generateWorkerEvent(String id, WorkerState state, long ts, String host) { return new LifecycleEventsProto.WorkerStatusEvent( LifecycleEventsProto.StatusEvent.StatusEventType.INFO, "test message", 1, WorkerId.fromId(id).get(), state, host, ts); } class EnableHostAction implements Action1<String>{ List<String> enableHostList = new ArrayList<>(); public EnableHostAction() { } @Override public void call(String s) { enableHostList.add(s); } public List<String> getEnableHostList() { return this.enableHostList; } } class DisableHostAction implements Action1<String>{ List<String> disableHostList = new ArrayList<>(); public DisableHostAction() { } @Override public void call(String s) { disableHostList.add(s); } public List<String> getDisableHostList() { return this.disableHostList; } } }
4,213
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/scheduler/FakeMantisScheduler.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.scheduler; import java.util.Collections; import java.util.List; import java.util.Optional; import akka.actor.ActorRef; import io.mantisrx.shaded.com.google.common.collect.Lists; import com.netflix.fenzo.VirtualMachineCurrentState; import com.netflix.fenzo.VirtualMachineLease; import io.mantisrx.common.WorkerPorts; import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat; import io.mantisrx.master.jobcluster.job.worker.WorkerStatus; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.server.core.Status; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.scheduler.ScheduleRequest; import io.mantisrx.server.master.scheduler.WorkerEvent; import io.mantisrx.server.master.scheduler.WorkerLaunched; import io.mantisrx.server.master.scheduler.WorkerResourceStatus; public class FakeMantisScheduler implements MantisScheduler { private final ActorRef jobClusterManagerActor; public FakeMantisScheduler(final ActorRef jobClusterManagerActor) { this.jobClusterManagerActor = jobClusterManagerActor; } @Override public void scheduleWorker(final ScheduleRequest scheduleRequest) { // Worker Launched final WorkerEvent workerLaunched = new WorkerLaunched(scheduleRequest.getWorkerId(), scheduleRequest.getStageNum(), "host1", "vm1", scheduleRequest.getPreferredCluster(), new WorkerPorts(Lists.newArrayList(8000, 9000, 9010, 9020, 9030))); jobClusterManagerActor.tell(workerLaunched, ActorRef.noSender()); // fake Worker Start initiated event final WorkerEvent workerStartInit = new WorkerStatus(new Status( scheduleRequest.getWorkerId().getJobId(), scheduleRequest.getStageNum(), scheduleRequest.getWorkerId().getWorkerIndex(), scheduleRequest.getWorkerId().getWorkerNum(), Status.TYPE.INFO, "fake Start Initiated", MantisJobState.StartInitiated)); jobClusterManagerActor.tell(workerStartInit, ActorRef.noSender()); // fake Worker Heartbeat event final WorkerEvent workerHeartbeat = new WorkerHeartbeat(new Status( scheduleRequest.getWorkerId().getJobId(), scheduleRequest.getStageNum(), scheduleRequest.getWorkerId().getWorkerIndex(), scheduleRequest.getWorkerId().getWorkerNum(), Status.TYPE.HEARTBEAT, "fake heartbeat event", MantisJobState.Started)); jobClusterManagerActor.tell(workerHeartbeat, ActorRef.noSender()); } @Override public void unscheduleWorker(final WorkerId workerId, final Optional<String> hostname) { final WorkerEvent workerCompleted = new WorkerResourceStatus(workerId, "fake unschedule worker", WorkerResourceStatus.VMResourceState.COMPLETED); jobClusterManagerActor.tell(workerCompleted, ActorRef.noSender()); } @Override public void unscheduleAndTerminateWorker(final WorkerId workerId, final Optional<String> hostname) { unscheduleWorker(workerId, hostname); } @Override public void updateWorkerSchedulingReadyTime(final WorkerId workerId, final long when) { // no-op } @Override public void initializeRunningWorker(final ScheduleRequest scheduleRequest, final String hostname) { // no-op } @Override public void rescindOffer(final String offerId) { // TBD } @Override public void rescindOffers(final String hostname) { // TBD } @Override public void addOffers(final List<VirtualMachineLease> offers) { // TBD } @Override public void disableVM(final String hostname, final long durationMillis) throws IllegalStateException { // TBD } @Override public void enableVM(final String hostname) { // TBD } @Override public List<VirtualMachineCurrentState> getCurrentVMState() { // TBD return Collections.emptyList(); } @Override public void setActiveVmGroups(final List<String> activeVmGroups) { // TBD } }
4,214
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/SLAEnforcerTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.util.List; import java.util.Set; import org.joda.time.Instant; import org.junit.Test; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.domain.SLA; import io.mantisrx.master.jobcluster.JobClusterActor.JobInfo; import io.mantisrx.master.jobcluster.job.JobState; public class SLAEnforcerTest { @Test public void testSorting() { Instant now = Instant.now(); List<JobInfo> jobList = Lists.newArrayList( new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null), new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Launched, null) ); int min =1; int max =1; SLA sla = new SLA(min,max, null, null); SLAEnforcer slaEnforcer = new SLAEnforcer(sla); Set<JobInfo> sortJobsByIdDesc = slaEnforcer.sortJobsByIdDesc(jobList); String [] expectedOrder = {"cname-1","cname-2","cname-3","cname-4"}; JobInfo [] jobIdArray = sortJobsByIdDesc.toArray(new JobInfo[sortJobsByIdDesc.size()]); for(int i=0; i< jobIdArray.length; i++) { System.out.println("[" + i + "] ->" + jobIdArray[i]); assertEquals(expectedOrder[i],(jobIdArray[i].jobId.getId())); } } @Test public void slaValidationTest() { int min = 5; int max = 2; try { SLA sla = new SLA(5, 2, null, null); fail(); } catch(Exception e) {} } @Test public void slaMinInvalidArgTest() { int min = 2; int max = 0; try { SLA sla = new SLA(min,max, null, null); SLAEnforcer slaEnf = new SLAEnforcer(sla); slaEnf.enforceSLAMin(-1, 0); fail(); } catch(Exception e) {} } @Test public void slaMinDefaultsTest() { SLA sla = new SLA(0,0, null, null); SLAEnforcer slaEnf = new SLAEnforcer(sla); assertEquals(0, slaEnf.enforceSLAMin(2, 0)); try { slaEnf = new SLAEnforcer(null); assertEquals(0, slaEnf.enforceSLAMin(2, 0)); } catch(Exception e) { fail(); } } @Test public void slaMinTest() { int min = 2; int max = 10; SLA sla = new SLA(min,max, null, null); SLAEnforcer slaEnf = new SLAEnforcer(sla); // min is 2 and active jobs count is 2 no need to launch any jobs assertEquals(0, slaEnf.enforceSLAMin(2, 0)); // min is 2 and active jobs is 1 and launched jobs is 1 no need to launch any more jobs assertEquals(0, slaEnf.enforceSLAMin(1, 1)); // min is 2, active = 1, launched = 0, therefore launch 1 job assertEquals(1, slaEnf.enforceSLAMin(1, 0)); } @Test public void slaMaxDefaultsTest() { Instant now = Instant.now(); int min = 0; int max = 0; SLA sla = new SLA(min,max, null, null); SLAEnforcer slaEnf = new SLAEnforcer(null); List<JobInfo> jobList = Lists.newArrayList( new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Launched, null), new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null) ); // sla not set nothing to enforce try { List<JobId> jobsToDelete = slaEnf.enforceSLAMax(jobList); assertTrue(jobsToDelete.isEmpty()); } catch(Exception e) { fail(); } slaEnf = new SLAEnforcer(sla); jobList = Lists.newArrayList( new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Launched, null), new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null) ); // sla max is 0 nothing to enforce List<JobId> jobsToDelete = slaEnf.enforceSLAMax(jobList); assertTrue(jobsToDelete.isEmpty()); } @Test public void slaMaxTest() { Instant now = Instant.now(); int min = 0; int max = 2; SLA sla = new SLA(min,max, null, null); SLAEnforcer slaEnf = new SLAEnforcer(sla); List<JobInfo> jobList = Lists.newArrayList( new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Launched, null), new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null) ); // 2 active and 2 accepted jobs, sla met at job id 2, hence delete job 1 List<JobId> jobsToDelete = slaEnf.enforceSLAMax(jobList); assertEquals(1, jobsToDelete.size()); assertEquals("cname-1", jobsToDelete.get(0).getId()); } @Test public void slaMaxTest2() { Instant now = Instant.now(); int min = 0; int max = 2; SLA sla = new SLA(min,max, null, null); SLAEnforcer slaEnf = new SLAEnforcer(sla); List<JobInfo> jobList = Lists.newArrayList( new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Launched, null), new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Launched, null), new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null) ); // 3 active and 1 accepted jobs, terminate job 2 List<JobId> jobsToDelete = slaEnf.enforceSLAMax(jobList); assertEquals(2, jobsToDelete.size()); boolean job1Found = false; boolean job2Found = false; for(JobId jId : jobsToDelete) { if(jId.getId().equals("cname-1")) { job1Found = true; } else if(jId.getId().equals("cname-2")) { job2Found = true; } } assertTrue(job1Found && job2Found); } @Test public void slaMaxTest3() { Instant now = Instant.now(); int min = 0; int max = 2; SLA sla = new SLA(min,max, null, null); SLAEnforcer slaEnf = new SLAEnforcer(sla); List<JobInfo> jobList = Lists.newArrayList( new JobInfo(new JobId("cname", 5), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null), new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 6), null, now.getMillis(), null, JobState.Launched, null) ); // 2 active and 4 accepted jobs, terminate jobs 3,2,1 List<JobId> jobsToDelete = slaEnf.enforceSLAMax(jobList); assertEquals(3, jobsToDelete.size()); assertTrue(jobsToDelete.contains(new JobId("cname",1))); assertTrue(jobsToDelete.contains(new JobId("cname",2))); assertTrue(jobsToDelete.contains(new JobId("cname",3))); } @Test public void slaMaxTest4() { Instant now = Instant.now(); int min = 0; int max = 2; SLA sla = new SLA(min,max, null, null); SLAEnforcer slaEnf = new SLAEnforcer(sla); List<JobInfo> jobList = Lists.newArrayList( new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null), new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 6), null, now.getMillis(), null, JobState.Launched, null), new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 5), null, now.getMillis(), null, JobState.Accepted, null), new JobInfo(new JobId("cname", 7), null, now.getMillis(), null, JobState.Launched, null) ); // 3 active and 4 accepted jobs, terminate jobs 1 & 2 & 3 & 4 & 5 List<JobId> jobsToDelete = slaEnf.enforceSLAMax(jobList); assertEquals(5, jobsToDelete.size()); assertTrue(jobsToDelete.contains(new JobId("cname",1))); assertTrue(jobsToDelete.contains(new JobId("cname",2))); assertTrue(jobsToDelete.contains(new JobId("cname",3))); assertTrue(jobsToDelete.contains(new JobId("cname",4))); assertTrue(jobsToDelete.contains(new JobId("cname",5))); } }
4,215
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/JobDefinitionResolverTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.common.Label; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.command.InvalidJobException; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.parameter.Parameter; import io.mantisrx.server.master.domain.JobClusterConfig; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.SLA; import org.junit.Test; import java.util.ArrayList; import java.util.List; import java.util.Optional; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class JobDefinitionResolverTest { public static final SLA NO_OP_SLA = new SLA(0, 0, null, null); public static final MachineDefinition DEFAULT_MACHINE_DEFINITION = new MachineDefinition(1, 10, 10, 10, 2); public static final SchedulingInfo SINGLE_WORKER_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(DEFAULT_MACHINE_DEFINITION, Lists.newArrayList(), Lists.newArrayList()).build(); public static final SchedulingInfo TWO_WORKER_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerStage(2, DEFAULT_MACHINE_DEFINITION).build(); public static final JobOwner DEFAULT_JOB_OWNER = new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo"); public static final String DEFAULT_ARTIFACT_NAME = "myart"; public static final String DEFAULT_VERSION = "0.0.1"; private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName) { return createFakeJobClusterDefn(clusterName, Lists.newArrayList(), Lists.newArrayList(), NO_OP_SLA, SINGLE_WORKER_SCHED_INFO); } private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName, List<Label> labels, List<Parameter> parameters) { return createFakeJobClusterDefn(clusterName, labels, parameters, NO_OP_SLA, SINGLE_WORKER_SCHED_INFO); } private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName, List<Label> labels, List<Parameter> parameters, SLA sla, SchedulingInfo schedulingInfo) { JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName(DEFAULT_ARTIFACT_NAME) .withSchedulingInfo(schedulingInfo) .withVersion(DEFAULT_VERSION) .build(); return new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(clusterConfig) .withName(clusterName) .withParameters(parameters) .withLabels(labels) .withUser("user") .withIsReadyForJobMaster(true) .withOwner(DEFAULT_JOB_OWNER) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .withSla(sla) .build(); } @Test public void artifactSchedPresentTest() { String clusterName = "artifactVersionSchedPresentTest"; List<Label> labels = new ArrayList<>(); Label label = new Label("l1", "lv1"); labels.add(label); List<Parameter> parameters = new ArrayList<>(); Parameter parameter = new Parameter("paramName", "paramValue"); parameters.add(parameter); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels, parameters); IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl(fakeJobCluster,1,false); String version = "0.0.2"; String artifactName = "myArt2"; SchedulingInfo schedulingInfo = TWO_WORKER_SCHED_INFO; try { JobDefinition givenJobDefn = new JobDefinition.Builder().withArtifactName(artifactName).withName(clusterName).withSchedulingInfo(schedulingInfo).withVersion(version).build(); JobDefinitionResolver resolver = new JobDefinitionResolver(); JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata); // assert the specified values are being used assertEquals(artifactName, resolvedJobDefinition.getArtifactName()); assertEquals(schedulingInfo, resolvedJobDefinition.getSchedulingInfo()); assertEquals(version, resolvedJobDefinition.getVersion()); // assert the parameters and labels are inherited since they were not specified assertEquals(1, resolvedJobDefinition.getLabels().size()); assertEquals(label, resolvedJobDefinition.getLabels().get(0)); assertEquals(1, resolvedJobDefinition.getParameters().size()); assertEquals(parameter, resolvedJobDefinition.getParameters().get(0)); } catch (InvalidJobException e) { e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } // Only ArtifactName and schedInfo is specified try { JobDefinition givenJobDefn = new JobDefinition.Builder().withArtifactName(artifactName).withName(clusterName).withSchedulingInfo(schedulingInfo).build(); JobDefinitionResolver resolver = new JobDefinitionResolver(); JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata); // assert the specified values are being used assertEquals(artifactName, resolvedJobDefinition.getArtifactName()); assertEquals(schedulingInfo, resolvedJobDefinition.getSchedulingInfo()); // assert a version no was generated assertTrue(resolvedJobDefinition.getVersion()!= null && !resolvedJobDefinition.getVersion().isEmpty()); // assert the parameters and labels are inherited since they were not specified assertEquals(1, resolvedJobDefinition.getLabels().size()); assertEquals(label, resolvedJobDefinition.getLabels().get(0)); assertEquals(1, resolvedJobDefinition.getParameters().size()); assertEquals(parameter, resolvedJobDefinition.getParameters().get(0)); } catch (InvalidJobException e) { e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void artifactPresentButSchedAbsentFailsTest() { String clusterName = "artifactPresentButSchedAbsentFailsTest"; List<Label> labels = new ArrayList<>(); Label label = new Label("l1", "lv1"); labels.add(label); List<Parameter> parameters = new ArrayList<>(); Parameter parameter = new Parameter("paramName", "paramValue"); parameters.add(parameter); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels, parameters); IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl(fakeJobCluster,1,false); String version = "0.0.2"; String artifactName = "myArt2"; // Only new artifact and version is specified try { JobDefinition givenJobDefn = new JobDefinition.Builder().withArtifactName(artifactName).withName(clusterName).withVersion(version).build(); JobDefinitionResolver resolver = new JobDefinitionResolver(); JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata); fail(); } catch (Exception e) { e.printStackTrace(); } // Only new artifact is specified try { JobDefinition givenJobDefn = new JobDefinition.Builder().withArtifactName(artifactName).withName(clusterName).build(); JobDefinitionResolver resolver = new JobDefinitionResolver(); JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata); fail(); } catch (Exception e) { e.printStackTrace(); } } @Test public void versionSchedPresentTest() { String clusterName = "versionSchedPresentTest"; List<Label> labels = new ArrayList<>(); Label label = new Label("l1", "lv1"); labels.add(label); List<Parameter> parameters = new ArrayList<>(); Parameter parameter = new Parameter("paramName", "paramValue"); parameters.add(parameter); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels, parameters); IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl(fakeJobCluster,1,false); String version = "0.0.1"; JobConstraints softConstraints = JobConstraints.ExclusiveHost; List<JobConstraints> constraintsList = new ArrayList<>(); constraintsList.add(softConstraints); SchedulingInfo schedulingInfo = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(DEFAULT_MACHINE_DEFINITION, Lists.newArrayList(), constraintsList).build(); try { JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withSchedulingInfo(schedulingInfo).withVersion(version).build(); JobDefinitionResolver resolver = new JobDefinitionResolver(); JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata); // artifact will get populated using the given version. assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName()); // scheduling info will be the one specified by us assertEquals(schedulingInfo, resolvedJobDefinition.getSchedulingInfo()); // version should match what we set. assertEquals(version, resolvedJobDefinition.getVersion()); // assert the parameters and labels are inherited since they were not specified assertEquals(1, resolvedJobDefinition.getLabels().size()); assertEquals(label, resolvedJobDefinition.getLabels().get(0)); assertEquals(1, resolvedJobDefinition.getParameters().size()); assertEquals(parameter, resolvedJobDefinition.getParameters().get(0)); } catch (InvalidJobException e) { e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } // Only version is specified try { JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withVersion(version).build(); JobDefinitionResolver resolver = new JobDefinitionResolver(); JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata); // assert the artifact is inherited assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName()); // assert the scheduling info is inherited assertEquals(SINGLE_WORKER_SCHED_INFO, resolvedJobDefinition.getSchedulingInfo()); // assert a version is the one we gave assertEquals(version, resolvedJobDefinition.getVersion()); // assert the parameters and labels are inherited since they were not specified assertEquals(1, resolvedJobDefinition.getLabels().size()); assertEquals(label, resolvedJobDefinition.getLabels().get(0)); assertEquals(1, resolvedJobDefinition.getParameters().size()); assertEquals(parameter, resolvedJobDefinition.getParameters().get(0)); } catch (InvalidJobException e) { e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void SchedPresentTest() { String clusterName = "SchedPresentTest"; List<Label> labels = new ArrayList<>(); Label label = new Label("l1", "lv1"); labels.add(label); List<Parameter> parameters = new ArrayList<>(); Parameter parameter = new Parameter("paramName", "paramValue"); parameters.add(parameter); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels, parameters); IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl(fakeJobCluster,1,false); JobConstraints softConstraints = JobConstraints.ExclusiveHost; List<JobConstraints> constraintsList = new ArrayList<>(); constraintsList.add(softConstraints); SchedulingInfo schedulingInfo = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(DEFAULT_MACHINE_DEFINITION, Lists.newArrayList(), constraintsList).build(); try { // only sched info set. JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withSchedulingInfo(schedulingInfo).build(); JobDefinitionResolver resolver = new JobDefinitionResolver(); JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata); // artifact will get populated using the given version. assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName()); // scheduling info will be the one specified by us assertEquals(schedulingInfo, resolvedJobDefinition.getSchedulingInfo()); // version should match the latest on the cluster assertEquals(DEFAULT_VERSION, resolvedJobDefinition.getVersion()); // assert the parameters and labels are inherited since they were not specified assertEquals(1, resolvedJobDefinition.getLabels().size()); assertEquals(label, resolvedJobDefinition.getLabels().get(0)); assertEquals(1, resolvedJobDefinition.getParameters().size()); assertEquals(parameter, resolvedJobDefinition.getParameters().get(0)); } catch (InvalidJobException e) { e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } // NOTHING is specified try { JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).build(); JobDefinitionResolver resolver = new JobDefinitionResolver(); JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata); // assert the artifact is inherited assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName()); // assert the scheduling info is inherited assertEquals(SINGLE_WORKER_SCHED_INFO, resolvedJobDefinition.getSchedulingInfo()); // assert a version is the dfeault one. assertEquals(DEFAULT_VERSION, resolvedJobDefinition.getVersion()); // assert the parameters and labels are inherited since they were not specified assertEquals(1, resolvedJobDefinition.getLabels().size()); assertEquals(label, resolvedJobDefinition.getLabels().get(0)); assertEquals(1, resolvedJobDefinition.getParameters().size()); assertEquals(parameter, resolvedJobDefinition.getParameters().get(0)); } catch (InvalidJobException e) { e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } // NOTHING is specified2 try { JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withVersion("null").build(); JobDefinitionResolver resolver = new JobDefinitionResolver(); JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata); // assert the artifact is inherited assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName()); // assert the scheduling info is inherited assertEquals(SINGLE_WORKER_SCHED_INFO, resolvedJobDefinition.getSchedulingInfo()); // assert a version is the dfeault one. assertEquals(DEFAULT_VERSION, resolvedJobDefinition.getVersion()); // assert the parameters and labels are inherited since they were not specified assertEquals(1, resolvedJobDefinition.getLabels().size()); assertEquals(label, resolvedJobDefinition.getLabels().get(0)); assertEquals(1, resolvedJobDefinition.getParameters().size()); assertEquals(parameter, resolvedJobDefinition.getParameters().get(0)); } catch (InvalidJobException e) { e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void versionNotFoundTest() { String clusterName = "versionNotFoundTest"; List<Label> labels = new ArrayList<>(); Label label = new Label("l1", "lv1"); labels.add(label); List<Parameter> parameters = new ArrayList<>(); Parameter parameter = new Parameter("paramName", "paramValue"); parameters.add(parameter); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels, parameters); IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl(fakeJobCluster,1,false); String version = "0.0.2"; try { JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withVersion(version).build(); JobDefinitionResolver resolver = new JobDefinitionResolver(); JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata); fail(); } catch (Exception e) { e.printStackTrace(); } } @Test public void lookupJobClusterConfigTest() { String clusterName = "lookupJobClusterConfigTest"; JobClusterConfig clusterConfig1 = new JobClusterConfig.Builder() .withArtifactName(DEFAULT_ARTIFACT_NAME) .withSchedulingInfo(SINGLE_WORKER_SCHED_INFO) .withVersion(DEFAULT_VERSION) .build(); JobClusterConfig clusterConfig2 = new JobClusterConfig.Builder() .withArtifactName("artifact2") .withSchedulingInfo(TWO_WORKER_SCHED_INFO) .withVersion("0.0.2") .build(); List<JobClusterConfig> configList = new ArrayList<>(); configList.add(clusterConfig1); configList.add(clusterConfig2); JobClusterDefinitionImpl jobClusterDefinition = new JobClusterDefinitionImpl.Builder() .withJobClusterConfigs(configList) .withName(clusterName) .withParameters(Lists.newArrayList()) .withLabels(Lists.newArrayList()) .withUser("user") .withIsReadyForJobMaster(true) .withOwner(DEFAULT_JOB_OWNER) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .withSla(NO_OP_SLA) .build(); IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl.Builder().withJobClusterDefinition(jobClusterDefinition).withLastJobCount(1).withIsDisabled(false).build(); JobDefinitionResolver resolver = new JobDefinitionResolver(); Optional<JobClusterConfig> config = resolver.getJobClusterConfigForVersion(jobClusterMetadata, DEFAULT_VERSION); assertTrue(config.isPresent()); assertEquals(DEFAULT_ARTIFACT_NAME, config.get().getArtifactName()); assertEquals(DEFAULT_VERSION, config.get().getVersion()); assertEquals(SINGLE_WORKER_SCHED_INFO, config.get().getSchedulingInfo()); Optional<JobClusterConfig> config2 = resolver.getJobClusterConfigForVersion(jobClusterMetadata, "0.0.2"); assertTrue(config2.isPresent()); assertEquals("artifact2", config2.get().getArtifactName()); assertEquals("0.0.2", config2.get().getVersion()); assertEquals(TWO_WORKER_SCHED_INFO, config2.get().getSchedulingInfo()); try { Optional<JobClusterConfig> config3 = resolver.getJobClusterConfigForVersion(jobClusterMetadata, "0.0.3"); assertTrue(!config3.isPresent()); } catch(Exception e) { e.printStackTrace(); fail(); } } }
4,216
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/LabelCacheTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import io.mantisrx.common.Label; import io.mantisrx.server.master.domain.JobId; import org.junit.Test; import java.util.ArrayList; import java.util.List; import java.util.Set; import static org.junit.Assert.*; public class LabelCacheTest { @Test public void addLabelTest() { JobClusterActor.LabelCache labelCache = new JobClusterActor.LabelCache(); JobId jId = new JobId("addLabelTest",1); List<Label> labelList = new ArrayList<>(); Label label1 = new Label("l1","v1"); labelList.add(label1); labelCache.addJobIdToLabelCache(jId,labelList); assertTrue(labelCache.labelJobIdMap.containsKey(label1)); assertTrue(labelCache.jobIdToLabelMap.containsKey(jId)); } @Test public void addLabelTest2() { JobClusterActor.LabelCache labelCache = new JobClusterActor.LabelCache(); JobId jId = new JobId("addLabelTest",1); JobId jId2 = new JobId("addLabelTest",2); List<Label> labelList = new ArrayList<>(); Label label1 = new Label("l1","v1"); labelList.add(label1); labelCache.addJobIdToLabelCache(jId,labelList); labelCache.addJobIdToLabelCache(jId2,labelList); assertTrue(labelCache.labelJobIdMap.containsKey(label1)); assertTrue(labelCache.jobIdToLabelMap.containsKey(jId)); assertTrue(labelCache.jobIdToLabelMap.containsKey(jId2)); } @Test public void removeLabelTest() { JobClusterActor.LabelCache labelCache = new JobClusterActor.LabelCache(); JobId jId = new JobId("addLabelTest",1); List<Label> labelList = new ArrayList<>(); Label label1 = new Label("l1","v1"); labelList.add(label1); labelCache.addJobIdToLabelCache(jId,labelList); assertTrue(labelCache.labelJobIdMap.containsKey(label1)); assertTrue(labelCache.jobIdToLabelMap.containsKey(jId)); labelCache.removeJobIdFromLabelCache(jId); assertFalse(labelCache.jobIdToLabelMap.containsKey(jId)); // label has no jobs associated with it remove label entry assertFalse(labelCache.labelJobIdMap.containsKey(label1)); } @Test public void removeLabelTest2() { JobClusterActor.LabelCache labelCache = new JobClusterActor.LabelCache(); JobId jId = new JobId("addLabelTest",1); JobId jId2 = new JobId("addLabelTest",2); List<Label> labelList = new ArrayList<>(); Label label1 = new Label("l1","v1"); labelList.add(label1); labelCache.addJobIdToLabelCache(jId,labelList); labelCache.addJobIdToLabelCache(jId2,labelList); assertTrue(labelCache.labelJobIdMap.containsKey(label1)); assertTrue(labelCache.jobIdToLabelMap.containsKey(jId)); assertTrue(labelCache.jobIdToLabelMap.containsKey(jId2)); labelCache.removeJobIdFromLabelCache(jId); assertFalse(labelCache.jobIdToLabelMap.containsKey(jId)); // label still has 1 job associated with it label entry should still exist assertTrue(labelCache.labelJobIdMap.containsKey(label1)); } @Test public void matchingLabelsAndTest() { JobClusterActor.LabelCache labelCache = new JobClusterActor.LabelCache(); JobId jId = new JobId("addLabelTest",1); JobId jId2 = new JobId("addLabelTest",2); JobId jId3 = new JobId("addLabelTest",3); JobId jId4 = new JobId("addLabelTest",4); List<Label> sourceList = new ArrayList<>(); List<Label> sourceMREList = new ArrayList<>(); List<Label> sourceKafkaList = new ArrayList<>(); Label jobTypeLabel = new Label("_mantis.jobType","source"); Label originMRELabel = new Label("_mantis.dataOrigin","mre"); Label originKafkaLabel = new Label("_mantis.dataOrigin","kafka"); sourceMREList.add(jobTypeLabel); sourceMREList.add(originMRELabel); sourceKafkaList.add(jobTypeLabel); sourceKafkaList.add(originKafkaLabel); sourceList.add(jobTypeLabel); labelCache.addJobIdToLabelCache(jId,sourceMREList); labelCache.addJobIdToLabelCache(jId2,sourceMREList); labelCache.addJobIdToLabelCache(jId3,sourceMREList); Set<JobId> jobIdsMatchingLabels = labelCache.getJobIdsMatchingLabels(sourceKafkaList, true); System.out.println("matchset " + jobIdsMatchingLabels); assertEquals(0, jobIdsMatchingLabels.size()); labelCache.addJobIdToLabelCache(jId4,sourceKafkaList); jobIdsMatchingLabels = labelCache.getJobIdsMatchingLabels(sourceKafkaList, true); System.out.println("matchset " + jobIdsMatchingLabels); assertEquals(1, jobIdsMatchingLabels.size()); assertTrue( jobIdsMatchingLabels.contains(jId4)); jobIdsMatchingLabels = labelCache.getJobIdsMatchingLabels(sourceList, true); System.out.println("matchset " + jobIdsMatchingLabels); assertEquals(4, jobIdsMatchingLabels.size()); //assertTrue( jobIdsMatchingLabels.contains(jId4)); } @Test public void matchingLabelsOrTest() { JobClusterActor.LabelCache labelCache = new JobClusterActor.LabelCache(); JobId jId = new JobId("addLabelTest",1); JobId jId2 = new JobId("addLabelTest",2); List<Label> labelList1 = new ArrayList<>(); List<Label> labelList2 = new ArrayList<>(); Label label1 = new Label("l1","v1"); Label label2 = new Label("l2","v2"); labelList1.add(label1); labelList2.add(label2); labelCache.addJobIdToLabelCache(jId,labelList1); labelCache.addJobIdToLabelCache(jId2,labelList2); List<Label> labelListAll = new ArrayList<>(); labelListAll.addAll(labelList1); labelListAll.addAll(labelList2); Set<JobId> jobIdsMatchingLabels = labelCache.getJobIdsMatchingLabels(labelListAll, false); assertEquals(2, jobIdsMatchingLabels.size()); boolean foundJob1 = false; boolean foundJob2 = false; for(JobId jobId : jobIdsMatchingLabels) { if(jobId.equals(jId)) { foundJob1 = true; } else if(jobId.equals(jId2)) { foundJob2 = true; } } assertTrue(foundJob1 && foundJob2); } }
4,217
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/JobClusterTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import static io.mantisrx.master.jobcluster.JobClusterActor.JobInfo; import static io.mantisrx.master.jobcluster.JobClusterActor.props; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_NOT_FOUND; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobCriteria; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLAResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse; import static java.util.Optional.empty; import static java.util.Optional.of; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.File; import java.time.Duration; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.testkit.javadsl.TestKit; import io.mantisrx.shaded.com.google.common.collect.Lists; import com.netflix.mantis.master.scheduler.TestHelpers; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import io.mantisrx.common.Label; import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.IMantisStageMetadata; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.job.JobTestHelper; import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl; import io.mantisrx.master.jobcluster.job.MantisJobMetadataView; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.master.jobcluster.job.worker.WorkerTerminate; import io.mantisrx.master.jobcluster.proto.BaseResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLARequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest; import io.mantisrx.master.jobcluster.proto.JobClusterProto; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.WorkerMigrationConfig.MigrationStrategyEnum; import io.mantisrx.runtime.command.InvalidJobException; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.Status; import io.mantisrx.server.core.Status.TYPE; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.domain.DataFormatAdapter; import io.mantisrx.server.master.domain.IJobClusterDefinition; import io.mantisrx.server.master.domain.JobClusterConfig; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.domain.SLA; import io.mantisrx.server.master.persistence.IMantisStorageProvider; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.MantisStorageProviderAdapter; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.scheduler.WorkerEvent; import io.mantisrx.server.master.store.NamedJob; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; import org.mockito.Mockito; import org.mockito.stubbing.Answer; import rx.schedulers.Schedulers; import rx.subjects.BehaviorSubject; public class JobClusterTest { public static final SLA NO_OP_SLA = new SLA(0, 0, null, null); public static final MachineDefinition DEFAULT_MACHINE_DEFINITION = new MachineDefinition(1, 10, 10, 10, 2); public static final SchedulingInfo SINGLE_WORKER_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(DEFAULT_MACHINE_DEFINITION, Lists.newArrayList(), Lists.newArrayList()).build(); public static final SchedulingInfo TWO_WORKER_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerStage(2, DEFAULT_MACHINE_DEFINITION).build(); public static final JobOwner DEFAULT_JOB_OWNER = new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo"); final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); static ActorSystem system; //private static TestKit probe; private static MantisJobStore jobStore; private static IMantisStorageProvider storageProvider; private static LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); private static final String user = "mantis"; @BeforeClass public static void setup() { Config config = ConfigFactory.parseString("akka {\n" + " loggers = [\"akka.testkit.TestEventListener\"]\n" + " loglevel = \"WARNING\"\n" + " stdout-loglevel = \"WARNING\"\n" + " test.single-expect-default = 1000 millis\n" + "}\n"); system = ActorSystem.create("JobClusterTest", config.withFallback(ConfigFactory.load())); JobTestHelper.createDirsIfRequired(); TestHelpers.setupMasterConfig(); storageProvider = new MantisStorageProviderAdapter(new io.mantisrx.server.master.store.SimpleCachedFileStorageProvider(), eventPublisher); jobStore = new MantisJobStore(storageProvider); } @AfterClass public static void tearDown() { //((SimpleCachedFileStorageProvider)storageProvider).deleteAllFiles(); JobTestHelper.deleteAllFiles(); TestKit.shutdownActorSystem(system); system = null; } private void deleteFiles(String dirName, final String jobId, final String filePrefix) { File spoolDir = new File(dirName); if (spoolDir != null) { for (File stageFile : spoolDir.listFiles((dir, name) -> { return name.startsWith(filePrefix + jobId + "-"); })) { stageFile.delete(); } } } private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName) { return createFakeJobClusterDefn(clusterName, Lists.newArrayList()); } private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName, List<Label> labels) { return createFakeJobClusterDefn(clusterName, labels, NO_OP_SLA); } private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName, List<Label> labels, SLA sla) { return createFakeJobClusterDefn(clusterName,labels, sla, SINGLE_WORKER_SCHED_INFO); } private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName, List<Label> labels, SLA sla, SchedulingInfo schedulingInfo) { JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName("myart") .withSchedulingInfo(schedulingInfo) .withVersion("0.0.1") .build(); return new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(clusterConfig) .withName(clusterName) .withParameters(Lists.newArrayList()) .withLabels(labels) .withUser(user) .withIsReadyForJobMaster(true) .withOwner(DEFAULT_JOB_OWNER) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .withSla(sla) .build(); } private JobDefinition createJob(String name, List<Label> labelList) throws InvalidJobException { return createJob(name, 0, MantisJobDurationType.Perpetual,null, SINGLE_WORKER_SCHED_INFO,labelList); } private JobDefinition createJob(String name2, long subsTimeoutSecs, MantisJobDurationType durationType) throws InvalidJobException { return createJob(name2, subsTimeoutSecs, durationType, null); } private JobDefinition createJob(String name2, long subsTimeoutSecs, MantisJobDurationType durationType, String userProvidedType) throws InvalidJobException { return createJob(name2, subsTimeoutSecs,durationType,userProvidedType,SINGLE_WORKER_SCHED_INFO,Lists.newArrayList()); } private JobDefinition createJob(String name2, long subsTimeoutSecs, MantisJobDurationType durationType, String userProvidedType, SchedulingInfo schedulingInfo, List<Label> labelList) throws InvalidJobException { return new JobDefinition.Builder() .withName(name2) .withParameters(Lists.newArrayList()) .withLabels(labelList) .withSchedulingInfo(schedulingInfo) .withArtifactName("myart") .withSubscriptionTimeoutSecs(subsTimeoutSecs) .withUser("njoshi") .withJobSla(new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, userProvidedType)) .build(); } private JobDefinition createJob(String name2) throws InvalidJobException { return createJob(name2, 0, MantisJobDurationType.Perpetual,null); } // CLUSTER CRUD TESTS /////////////////////////////////////////////////////////////////////////// @Test public void testJobClusterCreate() throws Exception { String name = "testJobClusterCreate"; TestKit probe = new TestKit(system); MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(name); ActorRef jobClusterActor = system.actorOf(props(name, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); jobClusterActor.tell(new GetJobClusterRequest(name), probe.getRef()); GetJobClusterResponse resp2 = probe.expectMsgClass(GetJobClusterResponse.class); System.out.println("resp2 " + resp2); assertEquals(SUCCESS, resp2.responseCode); assertEquals(name, resp2.getJobCluster().get().getName()); assertEquals("Nick", resp2.getJobCluster().get().getOwner().getName()); assertTrue(resp2.getJobCluster().get().getLabels().isEmpty()); assertEquals(1,resp2.getJobCluster().get().getJars().size()); jobClusterActor.tell(new JobClusterProto.DeleteJobClusterRequest(user, name, probe.getRef()), probe.getRef()); JobClusterProto.DeleteJobClusterResponse resp3 = probe.expectMsgClass(JobClusterProto.DeleteJobClusterResponse.class); assertEquals(SUCCESS, resp3.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); //verify(jobStoreMock, times(1)).storeNewJob(any()); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(1)).deleteJobCluster(name); probe.getSystem().stop(jobClusterActor); } @Test public void testJobClusterEnable() { try { TestKit probe = new TestKit(system); String clusterName = "testJobClusterEnable"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); String jobId = clusterName + "-1"; JobDefinition jobDefn = createJob(clusterName); IMantisJobMetadata job1 = new MantisJobMetadataImpl.Builder() .withJobDefinition(jobDefn) .withJobState(JobState.Completed) .withJobId(new JobId(clusterName,1)) .withNextWorkerNumToUse(2) .withSubmittedAt(1000) .build(); when(jobStoreMock.getArchivedJob(jobId)).thenReturn(of(job1)); SLA sla = new SLA(1,1,null,null); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, Lists.newArrayList(),sla); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1)); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched); jobClusterActor.tell(new DisableJobClusterRequest(clusterName,user),probe.getRef()); DisableJobClusterResponse resp = probe.expectMsgClass(DisableJobClusterResponse.class); assertTrue(BaseResponse.ResponseCode.SUCCESS.equals(resp.responseCode)); jobClusterActor.tell(new EnableJobClusterRequest(clusterName,user),probe.getRef()); EnableJobClusterResponse enableResp = probe.expectMsgClass(EnableJobClusterResponse.class); assertTrue(BaseResponse.ResponseCode.SUCCESS.equals(enableResp.responseCode)); // first job was killed during disable JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Completed); // Sla will cause new job to get launched String jobId2 = clusterName + "-2"; boolean accepted = false; int cnt = 0; // try a few times for timing issue while(cnt < 50) { jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId2).get()), probe.getRef()); GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class); if(detailsResp.responseCode.equals(BaseResponse.ResponseCode.SUCCESS)) { accepted = true; break; } Thread.sleep(1000); } assertTrue(accepted); // JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Completed); // JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor); // verify(jobStoreMock, times(1)).createJobCluster(any()); // verify(jobStoreMock, times(1)).updateJobCluster(any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } @Test public void testJobClusterUpdateAndDelete() throws Exception { TestKit probe = new TestKit(system); List<Label> labels = Lists.newLinkedList(); Label l = new Label("labelname","labelvalue"); labels.add(l); String clusterName = "testJobClusterUpdateAndDelete"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName("myart") .withSchedulingInfo(SINGLE_WORKER_SCHED_INFO) .withVersion("0.0.2") .build(); final JobClusterDefinitionImpl updatedJobCluster = new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(clusterConfig) .withName(clusterName) .withParameters(Lists.newArrayList()) .withLabels(labels) .withUser(user) .withIsReadyForJobMaster(true) .withOwner(DEFAULT_JOB_OWNER) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .withSla(NO_OP_SLA) .build(); jobClusterActor.tell(new UpdateJobClusterRequest(updatedJobCluster, "user"), probe.getRef()); UpdateJobClusterResponse resp = probe.expectMsgClass(UpdateJobClusterResponse.class); assertEquals(SUCCESS, resp.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp3.responseCode); assertTrue(resp3.getJobCluster() != null); System.out.println("Job cluster " + resp3.getJobCluster()); assertEquals(clusterName, resp3.getJobCluster().get().getName()); System.out.println("Updated job cluster " + resp3.getJobCluster()); assertEquals(1, resp3.getJobCluster().get().getLabels().size()); assertEquals("labelname", resp3.getJobCluster().get().getLabels().get(0).getName()); jobClusterActor.tell(new JobClusterProto.DeleteJobClusterRequest(user, clusterName, probe.getRef()), probe.getRef()); JobClusterProto.DeleteJobClusterResponse resp4 = probe.expectMsgClass(JobClusterProto.DeleteJobClusterResponse.class); assertEquals(SUCCESS, resp4.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(1)).updateJobCluster(any()); verify(jobStoreMock, times(1)).deleteJobCluster(clusterName); } @Test public void testJobClusterUpdateFailsIfArtifactNotUnique() throws Exception { TestKit probe = new TestKit(system); List<Label> labels = Lists.newLinkedList(); Label l = new Label("labelname","labelvalue"); labels.add(l); String clusterName = "testJobClusterUpdateFailsIfArtifactNotUnique"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); jobClusterActor.tell(new UpdateJobClusterRequest(fakeJobCluster, "user"), probe.getRef()); UpdateJobClusterResponse resp = probe.expectMsgClass(UpdateJobClusterResponse.class); assertEquals(CLIENT_ERROR, resp.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(0)).updateJobCluster(any()); } @Test public void testJobClusterDeleteFailsIfJobsActive() throws Exception { TestKit probe = new TestKit(system); List<Label> labels = Lists.newLinkedList(); Label l = new Label("labelname","labelvalue"); labels.add(l); String clusterName = "testJobClusterDeleteFailsIfJobsActive"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); jobClusterActor.tell(new JobClusterProto.DeleteJobClusterRequest(user, clusterName, probe.getRef()), probe.getRef()); JobClusterProto.DeleteJobClusterResponse resp4 = probe.expectMsgClass(JobClusterProto.DeleteJobClusterResponse.class); assertEquals(CLIENT_ERROR, resp4.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(1)).updateJobCluster(any()); verify(jobStoreMock, times(0)).deleteJobCluster(clusterName); } @Test public void testJobClusterDeletePurgesCompletedJobs() throws Exception { TestKit probe = new TestKit(system); List<Label> labels = Lists.newLinkedList(); Label l = new Label("labelname","labelvalue"); labels.add(l); String clusterName = "testJobClusterDeletePurgesCompletedJobs"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); jobClusterActor.tell(new DisableJobClusterRequest(clusterName, "user"), probe.getRef()); DisableJobClusterResponse disableResp = probe.expectMsgClass(DisableJobClusterResponse.class); assertEquals(SUCCESS, disableResp.responseCode); Thread.sleep(1000); jobClusterActor.tell(new JobClusterProto.DeleteJobClusterRequest(user, clusterName, probe.getRef()), probe.getRef()); JobClusterProto.DeleteJobClusterResponse resp4 = probe.expectMsgClass(JobClusterProto.DeleteJobClusterResponse.class); assertEquals(SUCCESS, resp4.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(2)).updateJobCluster(any()); verify(jobStoreMock, times(1)).deleteJobCluster(clusterName); verify(jobStoreMock, times(1)).storeCompletedJobForCluster(any(),any()); verify(jobStoreMock, times(1)).deleteJob("testJobClusterDeletePurgesCompletedJobs-1"); } @Test public void testJobClusterDisable() throws InterruptedException { TestKit probe = new TestKit(system); CountDownLatch storeCompletedCalled = new CountDownLatch(1); String clusterName = "testJobClusterDisable"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; IMantisJobMetadata completedJobMock = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName, 1)) .withJobDefinition(jobDefn) .withJobState(JobState.Completed) .build(); when(jobStoreMock.getArchivedJob(any())).thenReturn(of(completedJobMock)); JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); jobClusterActor.tell(new DisableJobClusterRequest(clusterName,"user"), probe.getRef()); DisableJobClusterResponse disableResp = probe.expectMsgClass(DisableJobClusterResponse.class); assertEquals(SUCCESS, disableResp.responseCode); jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse getJobClusterResp = probe.expectMsgClass(GetJobClusterResponse.class); assertTrue(getJobClusterResp.getJobCluster().get().isDisabled()); jobClusterActor.tell(new GetJobDetailsRequest(clusterName, JobId.fromId(jobId).get()),probe.getRef()); GetJobDetailsResponse jobDetailsResp = probe.expectMsgClass(GetJobDetailsResponse.class); assertEquals(SUCCESS, jobDetailsResp.responseCode); assertEquals(jobId, jobDetailsResp.getJobMetadata().get().getJobId().getId()); assertEquals(JobState.Completed, jobDetailsResp.getJobMetadata().get().getState()); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(2)).updateJobCluster(any()); verify(jobStoreMock, times(1)).storeNewJob(any()); verify(jobStoreMock, times(1)).updateStage(any()); verify(jobStoreMock,times(2)).updateJob(any()); verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); doAnswer((Answer) invocation -> { storeCompletedCalled.countDown(); return null; }).when(jobStoreMock).storeCompletedJobForCluster(any(),any()); storeCompletedCalled.await(1, TimeUnit.SECONDS); } catch (Exception e) { e.printStackTrace(); fail(); } } ///////////////////////////////////////////// CLUSTER CRUD END /////////////////////////////////////////////////////// ////////////////////////////// CLUSTER UPDATE FLAVORS /////////////////////////////////////////////////////////////// @Test public void testJobClusterSLAUpdate() throws Exception { TestKit probe = new TestKit(system); String clusterName = "testJobClusterSLAUpdate"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); SLA newSLA = new SLA(0,10,null,null); UpdateJobClusterSLARequest updateSlaReq = new UpdateJobClusterSLARequest(clusterName, newSLA.getMin(), newSLA.getMax(), "user"); jobClusterActor.tell(updateSlaReq, probe.getRef()); UpdateJobClusterSLAResponse resp = probe.expectMsgClass(UpdateJobClusterSLAResponse.class); assertEquals(SUCCESS, resp.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp3.responseCode); assertTrue(resp3.getJobCluster() != null); System.out.println("Job cluster " + resp3.getJobCluster()); assertEquals(clusterName, resp3.getJobCluster().get().getName()); System.out.println("Updated job cluster " + resp3.getJobCluster()); assertEquals(newSLA, DataFormatAdapter.convertToSLA(resp3.getJobCluster().get().getSla())); verify(jobStoreMock, times(1)).updateJobCluster(any()); verify(jobStoreMock, times(1)).createJobCluster(any()); } @Test public void testJobClusterMigrationConfigUpdate() throws Exception { TestKit probe = new TestKit(system); String clusterName = "testJobClusterMigrationConfigUpdate"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); WorkerMigrationConfig newConfig = new WorkerMigrationConfig(MigrationStrategyEnum.ONE_WORKER, "{'name':'value'}"); UpdateJobClusterWorkerMigrationStrategyRequest updateMigrationConfigReq = new UpdateJobClusterWorkerMigrationStrategyRequest(clusterName, newConfig, "user"); jobClusterActor.tell(updateMigrationConfigReq, probe.getRef()); UpdateJobClusterWorkerMigrationStrategyResponse resp = probe.expectMsgClass(UpdateJobClusterWorkerMigrationStrategyResponse.class); assertEquals(SUCCESS, resp.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp3.responseCode); assertTrue(resp3.getJobCluster() != null); System.out.println("Job cluster " + resp3.getJobCluster()); assertEquals(clusterName, resp3.getJobCluster().get().getName()); System.out.println("Updated job cluster " + resp3.getJobCluster()); assertEquals(MigrationStrategyEnum.ONE_WORKER, resp3.getJobCluster().get().getMigrationConfig().getStrategy()); verify(jobStoreMock, times(1)).updateJobCluster(any()); verify(jobStoreMock, times(1)).createJobCluster(any()); } @Test public void testJobClusterArtifactUpdate() throws Exception { TestKit probe = new TestKit(system); String clusterName = "testJobClusterArtifactUpdate"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); UpdateJobClusterArtifactRequest req = new UpdateJobClusterArtifactRequest(clusterName, "a1", "1.0.1", true, "user"); jobClusterActor.tell(req, probe.getRef()); UpdateJobClusterArtifactResponse resp = probe.expectMsgClass(UpdateJobClusterArtifactResponse.class); assertEquals(SUCCESS, resp.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp3.responseCode); assertTrue(resp3.getJobCluster() != null); System.out.println("Job cluster " + resp3.getJobCluster()); assertEquals(clusterName, resp3.getJobCluster().get().getName()); System.out.println("Updated job cluster " + resp3.getJobCluster()); assertEquals(2,resp3.getJobCluster().get().getJars().size()); //assertEquals("a1", resp3.getJobCluster().getJobClusterDefinition().getJobClusterConfig().getArtifactName()); assertEquals("1.0.1", resp3.getJobCluster().get().getLatestVersion()); List<NamedJob.Jar> jars = resp3.getJobCluster().get().getJars(); assertTrue(jars.get(jars.size()-1).getUploadedAt() != -1); verify(jobStoreMock, times(1)).updateJobCluster(any()); verify(jobStoreMock, times(1)).createJobCluster(any()); } @Test public void testJobClusterArtifactUpdateNotUniqueFails() throws Exception { TestKit probe = new TestKit(system); String clusterName = "testJobClusterArtifactUpdateNotUniqueFails"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); UpdateJobClusterArtifactRequest req = new UpdateJobClusterArtifactRequest(clusterName, "a1", "0.0.1", true, "user"); jobClusterActor.tell(req, probe.getRef()); UpdateJobClusterArtifactResponse resp = probe.expectMsgClass(UpdateJobClusterArtifactResponse.class); assertEquals(CLIENT_ERROR, resp.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp3.responseCode); assertTrue(resp3.getJobCluster() != null); System.out.println("Job cluster " + resp3.getJobCluster()); assertEquals(clusterName, resp3.getJobCluster().get().getName()); System.out.println("job cluster " + resp3.getJobCluster()); assertEquals(1,resp3.getJobCluster().get().getJars().size()); //assertEquals("a1", resp3.getJobCluster().getJobClusterDefinition().getJobClusterConfig().getArtifactName()); assertEquals("0.0.1", resp3.getJobCluster().get().getLatestVersion()); verify(jobStoreMock, times(0)).updateJobCluster(any()); verify(jobStoreMock, times(1)).createJobCluster(any()); } @Test public void testJobClusterArtifactUpdateMultipleTimes() throws Exception { TestKit probe = new TestKit(system); String clusterName = "testJobClusterArtifactUpdateMultipleTimes"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); UpdateJobClusterArtifactRequest req = new UpdateJobClusterArtifactRequest(clusterName, "a1", "1.0.1", true, "user"); jobClusterActor.tell(req, probe.getRef()); UpdateJobClusterArtifactResponse resp = probe.expectMsgClass(UpdateJobClusterArtifactResponse.class); assertEquals(SUCCESS, resp.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp3.responseCode); assertTrue(resp3.getJobCluster() != null); System.out.println("Job cluster " + resp3.getJobCluster()); assertEquals(clusterName, resp3.getJobCluster().get().getName()); System.out.println("Updated job cluster " + resp3.getJobCluster()); //assertEquals("a1", resp3.getJobCluster().getJobClusterDefinition().getJobClusterConfig().getArtifactName()); assertEquals("1.0.1", resp3.getJobCluster().get().getLatestVersion()); List<NamedJob.Jar> jars = resp3.getJobCluster().get().getJars(); System.out.println("jars --> " + jars); assertEquals(2, jars.size()); // Update again req = new UpdateJobClusterArtifactRequest(clusterName, "a2", "1.0.3", true, "user"); jobClusterActor.tell(req, probe.getRef()); resp = probe.expectMsgClass(UpdateJobClusterArtifactResponse.class); assertEquals(SUCCESS, resp.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); resp3 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp3.responseCode); assertTrue(resp3.getJobCluster() != null); System.out.println("Job cluster " + resp3.getJobCluster()); assertEquals(clusterName, resp3.getJobCluster().get().getName()); System.out.println("Updated job cluster " + resp3.getJobCluster()); //assertEquals("a1", resp3.getJobCluster().getJobClusterDefinition().getJobClusterConfig().getArtifactName()); assertEquals("1.0.3", resp3.getJobCluster().get().getLatestVersion()); jars = resp3.getJobCluster().get().getJars(); System.out.println("jars --> " + jars); assertEquals(3, jars.size()); verify(jobStoreMock, times(2)).updateJobCluster(any()); verify(jobStoreMock, times(1)).createJobCluster(any()); } @Test public void testJobClusterInvalidSLAUpdateIgnored() throws Exception { TestKit probe = new TestKit(system); String clusterName = "testJobClusterInvalidSLAUpdateIgnored"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); UpdateJobClusterSLARequest updateSlaReq = new UpdateJobClusterSLARequest(clusterName, 2, 1, "user"); jobClusterActor.tell(updateSlaReq, probe.getRef()); UpdateJobClusterSLAResponse resp = probe.expectMsgClass(UpdateJobClusterSLAResponse.class); assertEquals(CLIENT_ERROR, resp.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp3.responseCode); assertTrue(resp3.getJobCluster() != null); System.out.println("Job cluster " + resp3.getJobCluster()); assertEquals(clusterName, resp3.getJobCluster().get().getName()); // No changes to original SLA assertEquals(0, resp3.getJobCluster().get().getSla().getMin()); assertEquals(0, resp3.getJobCluster().get().getSla().getMax()); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(0)).updateJobCluster(any()); } @Test public void testJobClusterLabelsUpdate() throws Exception { TestKit probe = new TestKit(system); String clusterName = "testJobClusterLabelsUpdate"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); // assert initially no labels jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp3.responseCode); assertTrue(resp3.getJobCluster() != null); System.out.println("Job cluster " + resp3.getJobCluster()); assertEquals(clusterName, resp3.getJobCluster().get().getName()); System.out.println("Updated job cluster " + resp3.getJobCluster()); assertEquals(0, resp3.getJobCluster().get().getLabels().size()); // new labels List<Label> labels = Lists.newLinkedList(); Label l = new Label("labelname","labelvalue"); labels.add(l); UpdateJobClusterLabelsRequest updateLabelsReq = new UpdateJobClusterLabelsRequest(clusterName, labels, "user"); jobClusterActor.tell(updateLabelsReq, probe.getRef()); UpdateJobClusterLabelsResponse resp = probe.expectMsgClass(UpdateJobClusterLabelsResponse.class); assertEquals(SUCCESS, resp.responseCode); assertEquals(jobClusterActor, probe.getLastSender()); // get job cluster details jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); resp3 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp3.responseCode); assertTrue(resp3.getJobCluster() != null); assertEquals(clusterName, resp3.getJobCluster().get().getName()); //assert label list is of size 1 assertEquals(1, resp3.getJobCluster().get().getLabels().size()); assertEquals(l, resp3.getJobCluster().get().getLabels().get(0)); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(1)).updateJobCluster(any()); } ////////////////////////////////////// CLUSTER UPDATE FLAVORS END //////////////////////////////////////////////////// ////////////////////////////////////// JOB SUBMIT OPERATIONS ///////////////////////////////////////////////////////////// @Test public void testJobSubmit() { TestKit probe = new TestKit(system); String clusterName = "testJobSubmit"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); JobTestHelper.killJobSendWorkerTerminatedAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor, new WorkerId(jobId, 0 ,1)); Thread.sleep(500); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(1)).updateJobCluster(any()); verify(jobStoreMock, timeout(2000).times(1)).archiveJob(any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } @Test public void testJobSubmitWithNoJarAndSchedInfo() { TestKit probe = new TestKit(system); String clusterName = "testJobSubmitWithNoJarAndSchedInfo"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { final JobDefinition jobDefn = new JobDefinition.Builder() .withName(clusterName) .withParameters(Lists.newArrayList()) .withUser("njoshi") .withSubscriptionTimeoutSecs(300) .withJobSla(new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, "")) .build();; String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef()); GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class); // make sure it inherits from cluster assertEquals("myart", detailsResp.getJobMetadata().get().getArtifactName()); // inherits cluster scheduling Info assertEquals(SINGLE_WORKER_SCHED_INFO,detailsResp.getJobMetadata().get().getSchedulingInfo()); //JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(1)).updateJobCluster(any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } @Test public void testJobSubmitWithVersionAndNoSchedInfo() { TestKit probe = new TestKit(system); String clusterName = "testJobSubmitWithVersionAndNoSchedInfo"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName("myart2") .withSchedulingInfo(TWO_WORKER_SCHED_INFO) .withVersion("0.0.2") .build(); final JobClusterDefinitionImpl updatedFakeJobCluster = new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(clusterConfig) .withName(clusterName) .withParameters(Lists.newArrayList()) .withUser(user) .withIsReadyForJobMaster(true) .withOwner(DEFAULT_JOB_OWNER) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .withSla(NO_OP_SLA) .build(); jobClusterActor.tell(new UpdateJobClusterRequest(updatedFakeJobCluster, "user"), probe.getRef()); UpdateJobClusterResponse resp = probe.expectMsgClass(UpdateJobClusterResponse.class); jobClusterActor.tell(new GetJobClusterRequest(clusterName),probe.getRef()); GetJobClusterResponse getJobClusterResponse = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(2,getJobClusterResponse.getJobCluster().get().getJars().size()); try { final JobDefinition jobDefn = new JobDefinition.Builder() .withName(clusterName) .withParameters(Lists.newArrayList()) .withUser("njoshi") .withVersion("0.0.2") .withSubscriptionTimeoutSecs(300) .withJobSla(new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, "")) .build();; String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef()); GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class); // make sure it inherits from cluster assertEquals("myart2", detailsResp.getJobMetadata().get().getArtifactName()); // inherits cluster scheduling Info corresponding to the given artifact assertEquals(TWO_WORKER_SCHED_INFO,detailsResp.getJobMetadata().get().getSchedulingInfo()); // Now submit with a different artifact and no scheduling Info final JobDefinition jobDefn2 = new JobDefinition.Builder() .withName(clusterName) .withParameters(Lists.newArrayList()) .withUser("njoshi") .withVersion("0.0.1") .withSubscriptionTimeoutSecs(300) .withJobSla(new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, "")) .build();; String jobId2 = clusterName + "-2"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn2, jobId2); jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId2).get()), probe.getRef()); GetJobDetailsResponse detailsResp2 = probe.expectMsgClass(GetJobDetailsResponse.class); // make sure it inherits from cluster assertEquals("myart", detailsResp2.getJobMetadata().get().getArtifactName()); // inherits cluster scheduling Info corresponding to the given artifact assertEquals(SINGLE_WORKER_SCHED_INFO,detailsResp2.getJobMetadata().get().getSchedulingInfo()); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(3)).updateJobCluster(any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } @Test public void testJobComplete() { TestKit probe = new TestKit(system); String clusterName = "testJobComplete"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(jobId,0,1)); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched); JobTestHelper.sendWorkerCompletedEvent(probe,jobClusterActor,jobId,new WorkerId(jobId, 0,1)); JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor, jobId, JobState.Completed); verify(jobStoreMock, timeout(2000).times(1)).archiveJob(any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } } @Test public void testJobKillTriggersSLAToLaunchNew() { TestKit probe = new TestKit(system); String clusterName = "testJobKillTriggersSLAToLaunchNew"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); SLA sla = new SLA(1,1,null,null); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, Lists.newArrayList(),sla); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); String jobId = clusterName + "-1"; WorkerId workerId1 = new WorkerId(clusterName, jobId, 0, 1); doAnswer(invocation -> { WorkerEvent terminate = new WorkerTerminate(workerId1, WorkerState.Completed, JobCompletedReason.Killed, System.currentTimeMillis()); jobClusterActor.tell(terminate, probe.getRef()); return null; }).when(schedulerMock).unscheduleWorker(any(),any()); try { final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); JobId jId = new JobId(clusterName,1); JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1)); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched); JobTestHelper.killJobAndVerify(probe,clusterName,jId,jobClusterActor); Thread.sleep(500); // a new job should have been submitted JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, clusterName + "-2", SUCCESS, JobState.Accepted); //JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor); // verify(jobStoreMock, times(1)).createJobCluster(any()); // verify(jobStoreMock, times(1)).updateJobCluster(any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } // TODO // TODO @Test public void testJobSubmitTriggersSLAToKillOld() { TestKit probe = new TestKit(system); String clusterName = "testJobSubmitTriggersSLAToKillOld"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); SLA sla = new SLA(1,1,null,null); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, Lists.newArrayList(),sla); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); String jobId = clusterName + "-1"; WorkerId workerId1 = new WorkerId(clusterName, jobId, 0, 1); doAnswer(invocation -> { WorkerEvent terminate = new WorkerTerminate(workerId1, WorkerState.Completed, JobCompletedReason.Killed, System.currentTimeMillis()); jobClusterActor.tell(terminate, probe.getRef()); return null; }).when(schedulerMock).unscheduleWorker(any(),any()); try { final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1)); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched); // submit 2nd job String jobId2 = clusterName + "-2"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId2); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId2, SUCCESS, JobState.Accepted); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId2,1,new WorkerId(clusterName,jobId2,0,1)); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId2, SUCCESS, JobState.Launched); boolean completed = false; assertTrue(JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor,jobId,JobState.Completed)); // jobClusterActor.tell(new ListJobIdsRequest(), probe.getRef()); // ListJobIdsResponse listJobIdsResponse = probe.expectMsgClass(ListJobIdsResponse.class); // assertEquals(SUCCESS, listJobIdsResponse.responseCode); // assertEquals(1,listJobIdsResponse.getJobIds().size()); // assertEquals(jobId2, listJobIdsResponse.getJobIds().get(0).getId()); // // try a few times for timing issue // for(int i=0; i<10; i++) { // jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef()); // GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class); // if(JobState.Completed.equals(detailsResp.getJobMetadata().get().getState())) { // completed = true; // break; // } // } // assertTrue(completed); // JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Completed); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor); // verify(jobStoreMock, times(1)).createJobCluster(any()); // verify(jobStoreMock, times(1)).updateJobCluster(any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } //TODO @Test public void testJobSubmitTriggersSLAToKillOldHandlesErrors() { TestKit probe = new TestKit(system); String clusterName = "testJobSubmitTriggersSLAToKillOldHandlesErrors"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); SLA sla = new SLA(1,1,null,null); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, Lists.newArrayList(),sla); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { doThrow(new NullPointerException("NPE archiving worker")).when(jobStoreMock).archiveWorker(any()); final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1)); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched); // submit 2nd job String jobId2 = clusterName + "-2"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId2); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId2, SUCCESS, JobState.Accepted); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId2,1,new WorkerId(clusterName,jobId2,0,1)); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId2, SUCCESS, JobState.Launched); boolean completed = false; assertTrue(JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor,jobId,JobState.Completed)); // try a few times for timing issue // for(int i=0; i<10; i++) { // jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef()); // GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class); // if(JobState.Completed.equals(detailsResp.getJobMetadata().get().getState())) { // completed = true; // break; // } // } // assertTrue(completed); jobClusterActor.tell(new ListJobIdsRequest(), probe.getRef()); ListJobIdsResponse listResp = probe.expectMsgClass(ListJobIdsResponse.class); assertEquals(1,listResp.getJobIds().size()); assertEquals(jobId2, listResp.getJobIds().get(0).getJobId()); // JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Completed); //JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor); // verify(jobStoreMock, times(1)).createJobCluster(any()); // verify(jobStoreMock, times(1)).updateJobCluster(any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } @Test public void testCronTriggersSLAToKillOld() { TestKit probe = new TestKit(system); String clusterName = "testJobSubmitTriggersSLAToKillOld"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); SLA sla = new SLA(1,1,"0/1 * * * * ?",IJobClusterDefinition.CronPolicy.KEEP_NEW); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, Lists.newArrayList(),sla); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1)); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched); // try a few times for timing issue String jobId2 = clusterName + "-2"; assertTrue(JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor,jobId2,JobState.Accepted)); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId2,1,new WorkerId(clusterName,jobId2,0,1)); assertTrue(JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor,jobId,JobState.Completed)); // verify(jobStoreMock, times(1)).createJobCluster(any()); // verify(jobStoreMock, times(1)).updateJobCluster(any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } @Test public void testJobSubmitWithUnique() { TestKit probe = new TestKit(system); String clusterName = "testJobSubmitWithUnique"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient, "mytype"); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(jobId,0,1)); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched); jobClusterActor.tell(new SubmitJobRequest(clusterName,"user", Optional.ofNullable(jobDefn)), probe.getRef()); SubmitJobResponse submitResponse = probe.expectMsgClass(SubmitJobResponse.class); // Get the same job id back assertTrue(submitResponse.getJobId().isPresent()); assertEquals(jobId,submitResponse.getJobId().get().getId()); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(1)).updateJobCluster(any()); verify(jobStoreMock, times(1)).storeNewJob(any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } } @Test public void testQuickJobSubmit() { TestKit probe = new TestKit(system); String clusterName = "testQuickJobSubmit"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); // submit another job this time with no job definition JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, null, clusterName + "-2"); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, clusterName + "-2", SUCCESS, JobState.Accepted); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(2)).updateJobCluster(any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } @Test public void testQuickJobSubmitWithNoSchedInfoInPreviousJob() { TestKit probe = new TestKit(system); String clusterName = "testQuickJobSubmitWithNoSchedInfoInPreviousJob"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { // job defn with scheduling info final JobDefinition jobDefn = new JobDefinition.Builder() .withName(clusterName) .withParameters(Lists.newArrayList()) .withLabels(Lists.newArrayList()) .withVersion("0.0.1") .withSubscriptionTimeoutSecs(300) .withUser("njoshi") .withJobSla(new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, "abc")) .build(); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); // submit another job this time with no job definition JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, null, clusterName + "-2"); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, clusterName + "-2", SUCCESS, JobState.Accepted); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(2)).updateJobCluster(any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } @Test public void testJobSubmitWithNoSchedInfoUsesJobClusterValues() { TestKit probe = new TestKit(system); String clusterName = "testJobSubmitWithNoSchedInfoUsesJobClusterValues"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); List<Label> clusterLabels = new ArrayList<>(); Label label = new Label("clabelName", "cLabelValue"); clusterLabels.add(label); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, clusterLabels); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { final JobDefinition jobDefn = new JobDefinition.Builder() .withName(clusterName) .withVersion("0.0.1") .withSubscriptionTimeoutSecs(0) .withUser("njoshi") .build(); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef()); GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class); assertEquals(SUCCESS, detailsResp.responseCode); assertEquals(JobState.Accepted, detailsResp.getJobMetadata().get().getState()); // assertEquals(clusterLabels.size() + LabelManager.numberOfMandatoryLabels(),detailsResp.getJobMetadata().get().getLabels().size()); // confirm that the clusters labels got inherited assertEquals(1, detailsResp.getJobMetadata().get() .getLabels().stream().filter(l -> l.getName().equals("clabelName")).count()); //assertEquals(label, detailsResp.getJobMetadata().get().getLabels().get(0)); // Now submit another one with labels, it should not inherit cluster labels Label jobLabel = new Label("jobLabel", "jobValue"); List<Label> jobLabelList = new ArrayList<>(); jobLabelList.add(jobLabel); final JobDefinition jobDefn2 = new JobDefinition.Builder() .withName(clusterName) .withVersion("0.0.1") .withLabels(jobLabelList) .withSubscriptionTimeoutSecs(0) .withUser("njoshi") .build(); String jobId2 = clusterName + "-2"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn2, jobId2); jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId2).get()), probe.getRef()); GetJobDetailsResponse detailsResp2 = probe.expectMsgClass(GetJobDetailsResponse.class); assertEquals(SUCCESS, detailsResp2.responseCode); assertEquals(JobState.Accepted, detailsResp2.getJobMetadata().get().getState()); assertEquals(clusterLabels.size()+2,detailsResp2.getJobMetadata().get().getLabels().size()); // confirm that the clusters labels got inherited //assertEquals(jobLabel, detailsResp2.getJobMetadata().get().getLabels().get(0)); assertEquals(1, detailsResp2.getJobMetadata().get() .getLabels().stream().filter(l -> l.getName().equals(jobLabel.getName())).count()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } @Test public void testQuickJobSubmitWithNoPreviousHistoryFails() { TestKit probe = new TestKit(system); String clusterName = "testQuickJobSubmitWithNoPreviousHistoryFails"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { final JobDefinition jobDefn = null; String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifyStatus(probe, clusterName, jobClusterActor, jobDefn, null, CLIENT_ERROR); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, CLIENT_ERROR_NOT_FOUND, JobState.Noop); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(0)).updateJobCluster(any()); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test @Ignore public void testUpdateJobClusterArtifactWithAutoSubmit() { TestKit probe = new TestKit(system); try { String clusterName = "testUpdateJobClusterArtifactWithAutoSubmit"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); SLA sla = new SLA(1,1,null,null); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, Lists.newArrayList(),sla); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; jobClusterActor.tell(new SubmitJobRequest(clusterName,"user", Optional.ofNullable(jobDefn)), probe.getRef()); SubmitJobResponse submitResponse = probe.expectMsgClass(SubmitJobResponse.class); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1)); JobTestHelper.getJobDetailsAndVerify(probe,jobClusterActor,jobId, BaseResponse.ResponseCode.SUCCESS,JobState.Launched); // Update artifact with skip submit = false String artifact = "newartifact.zip"; String version = "0.0.2"; jobClusterActor.tell(new UpdateJobClusterArtifactRequest(clusterName, artifact, version,false, user), probe.getRef()); UpdateJobClusterArtifactResponse resp = probe.expectMsgClass(UpdateJobClusterArtifactResponse.class); // ensure new job was launched String jobId2 = clusterName + "-2"; assertTrue(JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor,jobId2,JobState.Accepted)); // send it worker events to move it to started state JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId2,1,new WorkerId(clusterName,jobId2,0,1)); jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId2).get()), probe.getRef()); GetJobDetailsResponse detailsResp = probe.expectMsgClass(Duration.ofSeconds(5), GetJobDetailsResponse.class); assertTrue((JobState.Launched.equals(detailsResp.getJobMetadata().get().getState()))); assertEquals(artifact,detailsResp.getJobMetadata().get().getArtifactName()); assertTrue(JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor,jobId,JobState.Completed)); } catch (InvalidJobException e) { e.printStackTrace(); } } @Test public void testJobSubmitFails() { TestKit probe = new TestKit(system); try { String clusterName = "testJobSubmitFails"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); Mockito.doThrow(Exception.class).when(jobStoreMock).storeNewJob(any()); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; jobClusterActor.tell(new SubmitJobRequest(clusterName,"user", Optional.ofNullable(jobDefn)), probe.getRef()); SubmitJobResponse submitResponse = probe.expectMsgClass(SubmitJobResponse.class); assertEquals(SERVER_ERROR, submitResponse.responseCode); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(1)).updateJobCluster(any()); verify(jobStoreMock, times(0)).storeNewWorker(any()); verify(jobStoreMock, times(0)).storeNewWorkers(any(),any()); } catch (Exception e) { fail(); } } ////////////////////////////////// JOB SUBMIT OPERATIONS END///////////////////////////////////////////////////////////// ////////////////////////////////// OTHER JOB OPERATIONS ////////////////////////////////////////////////////////////// @Test public void testGetLastSubmittedJobSubject() { TestKit probe = new TestKit(system); String clusterName = "testGetLastSubmittedJobSubject"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { jobClusterActor.tell(new GetLastSubmittedJobIdStreamRequest(clusterName), probe.getRef()); GetLastSubmittedJobIdStreamResponse getLastSubmittedJobIdStreamResponse = probe.expectMsgClass(GetLastSubmittedJobIdStreamResponse.class); assertEquals(SUCCESS, getLastSubmittedJobIdStreamResponse.responseCode); CountDownLatch jobIdLatch = new CountDownLatch(1); assertTrue(getLastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject().isPresent()); BehaviorSubject<JobId> jobIdBehaviorSubject = getLastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject().get(); jobIdBehaviorSubject.subscribeOn(Schedulers.io()).subscribe((jId) -> { System.out.println("Got Jid ------> " + jId); String jIdStr = jId.getId(); assertEquals(clusterName + "-1",jIdStr); jobIdLatch.countDown(); }); final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); jobIdLatch.await(1000,TimeUnit.SECONDS); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } @Test public void testGetLastSubmittedJobSubjectWithWrongClusterNameFails() { TestKit probe = new TestKit(system); String clusterName = "testGetLastSubmittedJobSubjectWithWrongClusterNameFails"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { jobClusterActor.tell(new GetLastSubmittedJobIdStreamRequest("randomCluster"), probe.getRef()); GetLastSubmittedJobIdStreamResponse getLastSubmittedJobIdStreamResponse = probe.expectMsgClass(GetLastSubmittedJobIdStreamResponse.class); assertEquals(CLIENT_ERROR, getLastSubmittedJobIdStreamResponse.responseCode); assertTrue(!getLastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject().isPresent()); final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); } @Test public void testListArchivedWorkers() { TestKit probe = new TestKit(system); String clusterName = "testListArchivedWorkers"; MantisScheduler schedulerMock = mock(MantisScheduler.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStore, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); String jobId = clusterName + "-1"; try { final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(jobId,0,1)); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched); jobClusterActor.tell(new ResubmitWorkerRequest(jobId,1,user,of("justbecause")),probe.getRef()); ResubmitWorkerResponse resp = probe.expectMsgClass(ResubmitWorkerResponse.class); assertTrue(BaseResponse.ResponseCode.SUCCESS.equals(resp.responseCode)); jobClusterActor.tell(new ListArchivedWorkersRequest(new JobId(clusterName, 1)),probe.getRef()); ListArchivedWorkersResponse archivedWorkersResponse = probe.expectMsgClass(ListArchivedWorkersResponse.class); assertEquals(SUCCESS, archivedWorkersResponse.responseCode); assertEquals(1,archivedWorkersResponse.getWorkerMetadata().size()); IMantisWorkerMetadata archivedWorker = archivedWorkersResponse.getWorkerMetadata().get(0); assertEquals(1,archivedWorker.getWorkerNumber()); assertEquals(0,archivedWorker.getWorkerIndex()); assertEquals(0, archivedWorker.getResubmitOf()); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } } @Test public void testZombieWorkerKilledOnMessage() { String clusterName = "testZombieWorkerKilledOnMessage"; TestKit probe = new TestKit(system); MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { String jobId = clusterName + "-1"; WorkerId workerId = new WorkerId(clusterName, jobId,0,1); WorkerEvent heartBeat2 = new WorkerHeartbeat(new Status(jobId, 1, workerId.getWorkerIndex(), workerId.getWorkerNum(), TYPE.HEARTBEAT, "", MantisJobState.Started, System.currentTimeMillis())); jobClusterActor.tell(heartBeat2, probe.getRef()); jobClusterActor.tell(new GetJobClusterRequest(clusterName),probe.getRef()); GetJobClusterResponse resp = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(clusterName,resp.getJobCluster().get().getName()); verify(schedulerMock,times(1)).unscheduleAndTerminateWorker(workerId,empty()); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void testZombieWorkerTerminateEventIgnored() { TestKit probe = new TestKit(system); String clusterName = "testZombieWorkerTerminateEventIgnored"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { String jobId = clusterName + "-1"; WorkerId workerId = new WorkerId(clusterName, jobId,0,1); JobTestHelper.sendWorkerTerminatedEvent(probe,jobClusterActor,jobId,workerId); verify(schedulerMock,times(0)).unscheduleAndTerminateWorker(workerId,empty()); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void testResubmitWorker() { TestKit probe = new TestKit(system); String clusterName = "testResubmitWorker"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(jobId,0,1)); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched); jobClusterActor.tell(new ResubmitWorkerRequest(jobId,1,user,of("justbecause")),probe.getRef()); ResubmitWorkerResponse resp = probe.expectMsgClass(ResubmitWorkerResponse.class); assertTrue(BaseResponse.ResponseCode.SUCCESS.equals(resp.responseCode)); jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef()); GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class); IMantisWorkerMetadata workerMetadata = detailsResp.getJobMetadata().get().getWorkerByIndex(1,0).get().getMetadata(); assertEquals(2,workerMetadata.getWorkerNumber()); assertEquals(1,workerMetadata.getResubmitOf()); assertEquals(1, workerMetadata.getTotalResubmitCount()); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(1)).updateJobCluster(any()); verify(jobStoreMock,times(1)).replaceTerminatedWorker(any(),any()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } } @Test public void testScaleStage() { TestKit probe = new TestKit(system); try { String clusterName = "testScaleStage"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>(); smap.put(StageScalingPolicy.ScalingReason.CPU, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.CPU, 0.5, 0.75, null)); smap.put(StageScalingPolicy.ScalingReason.DataDrop, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.DataDrop, 0.0, 2.0, null)); SchedulingInfo SINGLE_WORKER_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1) .multiWorkerScalableStageWithConstraints(1,DEFAULT_MACHINE_DEFINITION,Lists.newArrayList(),Lists.newArrayList(),new StageScalingPolicy(1,1,10,1,1,1, smap)).build(); final JobDefinition jobDefn = createJob(clusterName, 1, MantisJobDurationType.Transient, "USER_TYPE", SINGLE_WORKER_SCHED_INFO, Lists.newArrayList()); String jobId = clusterName + "-1"; jobClusterActor.tell(new SubmitJobRequest(clusterName, "user", Optional.ofNullable(jobDefn)), probe.getRef()); SubmitJobResponse submitResponse = probe.expectMsgClass(SubmitJobResponse.class); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobClusterActor, jobId, 0, new WorkerId(clusterName, jobId, 0, 1)); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobClusterActor, jobId, 1, new WorkerId(clusterName, jobId, 0, 2)); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, BaseResponse.ResponseCode.SUCCESS, JobState.Launched); jobClusterActor.tell(new ScaleStageRequest(jobId,1,2,user,"No reason"), probe.getRef()); ScaleStageResponse scaleResp = probe.expectMsgClass(ScaleStageResponse.class); System.out.println("scale Resp: " + scaleResp.message); assertEquals(SUCCESS, scaleResp.responseCode); assertEquals(2,scaleResp.getActualNumWorkers()); verify(jobStoreMock, times(1)).storeNewJob(any()); // initial worker verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); //scale up worker verify(jobStoreMock, times(1)).storeNewWorker(any()); verify(jobStoreMock, times(6)).updateWorker(any()); verify(jobStoreMock, times(3)).updateJob(any()); // initial worker and scale up worker verify(schedulerMock, times(3)).scheduleWorker(any()); } catch(Exception e) { e.printStackTrace(); fail(); } } ////////////////////////////////// OTHER JOB OPERATIONS ////////////////////////////////////////////////////////////// /////////////////////////// JOB LIST OPERATIONS ///////////////////////////////////////////////////////////////// @Test public void testGetJobDetailsForArchivedJob() { TestKit probe = new TestKit(system); String clusterName = "testGetJobDetailsForArchivedJob"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); String jobId = clusterName + "-1"; try { when(jobStoreMock.getArchivedJob(jobId)).thenReturn(of(new MantisJobMetadataImpl.Builder() .withJobState(JobState.Completed) .withJobId(new JobId(clusterName, 1)) .withSubmittedAt(1000) .withNextWorkerNumToUse(2) .build())); final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor); jobClusterActor.tell(new WorkerTerminate(new WorkerId(clusterName + "-1",0,1),WorkerState.Completed,JobCompletedReason.Killed,System.currentTimeMillis()),probe.getRef()); Thread.sleep(1000); jobClusterActor.tell(new GetJobDetailsRequest(user,jobId),probe.getRef()); GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class); // assertEquals(SUCCESS,resp.responseCode); assertEquals(JobState.Completed, resp.getJobMetadata().get().getState()); verify(jobStoreMock, times(1)).createJobCluster(any()); verify(jobStoreMock, times(1)).updateJobCluster(any()); // verify(jobStoreMock, times(1)).getArchivedJob(jobId); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } } @Test public void testListJobIdsForCluster() throws InvalidJobException { TestKit probe = new TestKit(system); String clusterName = "testListJobsForCluster"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); final JobDefinition jobDefn1 = createJob(clusterName); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe,clusterName, jobClusterActor, jobDefn1, jobId); String jobId2 = clusterName + "-2"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn1, jobId2); jobClusterActor.tell(new ListJobIdsRequest(), probe.getRef()); ListJobIdsResponse listResp = probe.expectMsgClass(ListJobIdsResponse.class); assertEquals(SUCCESS, listResp.responseCode); assertEquals(2, listResp.getJobIds().size()); boolean foundJob1 = false; boolean foundJob2 = false; for(JobClusterProtoAdapter.JobIdInfo jobIdInfo : listResp.getJobIds()) { if(jobIdInfo.getJobId().equals(jobId)) { foundJob1 = true; } else if(jobIdInfo.getJobId().equals(jobId2)) { foundJob2 = true; } } assertTrue(foundJob1); assertTrue(foundJob2); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor); jobClusterActor.tell(new ListJobIdsRequest(empty(), empty(), of(true), empty(), empty(), empty()), probe.getRef()); ListJobIdsResponse listResp2 = probe.expectMsgClass(ListJobIdsResponse.class); assertEquals(SUCCESS, listResp2.responseCode); assertEquals(1, listResp2.getJobIds().size()); // assertFalse(listResp2.getJobIds().contains(JobId.fromId(jobId).get())); // assertTrue(listResp2.getJobIds().contains(JobId.fromId(jobId2).get())); foundJob1 = false; foundJob2 = false; for(JobClusterProtoAdapter.JobIdInfo jobIdInfo : listResp2.getJobIds()) { if(jobIdInfo.getJobId().equals(jobId)) { foundJob1 = true; } else if(jobIdInfo.getJobId().equals(jobId2)) { foundJob2 = true; } } assertFalse(foundJob1); assertTrue(foundJob2); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor); jobClusterActor.tell(new ListJobIdsRequest(), probe.getRef()); ListJobIdsResponse listResp3 = probe.expectMsgClass(ListJobIdsResponse.class); assertEquals(SUCCESS, listResp3.responseCode); assertEquals(0, listResp3.getJobIds().size()); // assertFalse(listResp3.getJobIds().contains(JobId.fromId(jobId).get())); // assertFalse(listResp3.getJobIds().contains(JobId.fromId(jobId2).get())); foundJob1 = false; foundJob2 = false; for(JobClusterProtoAdapter.JobIdInfo jobIdInfo : listResp3.getJobIds()) { if(jobIdInfo.getJobId().equals(jobId)) { foundJob1 = true; } else if(jobIdInfo.getJobId().equals(jobId2)) { foundJob2 = true; } } assertFalse(foundJob1); assertFalse(foundJob2); } @Test public void testListJobsForCluster() throws InvalidJobException, InterruptedException { TestKit probe = new TestKit(system); String clusterName = "testListJobsForCluster"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); final JobDefinition jobDefn1 = createJob(clusterName); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe,clusterName, jobClusterActor, jobDefn1, jobId); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1)); String jobId2 = clusterName + "-2"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn1, jobId2); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId2,1,new WorkerId(clusterName,jobId2,0,1)); jobClusterActor.tell(new ListJobsRequest(), probe.getRef()); // Thread.sleep(1000); ListJobsResponse listResp = probe.expectMsgClass(ListJobsResponse.class); assertEquals(SUCCESS, listResp.responseCode); assertEquals(2, listResp.getJobList().size()); // assertTrue(listResp.getJobIds().contains(JobId.fromId(jobId).get())); // assertTrue(listResp.getJobIds().contains(JobId.fromId(jobId2).get())); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor); jobClusterActor.tell(new ListJobsRequest(new ListJobCriteria(empty(), empty(), Lists.newArrayList(), Lists.newArrayList(), Lists.newArrayList(), Lists.newArrayList(), of(true), empty(), empty(), empty())), probe.getRef()); ListJobsResponse listResp2 = probe.expectMsgClass(ListJobsResponse.class); assertEquals(SUCCESS, listResp2.responseCode); assertEquals(1, listResp2.getJobList().size()); // assertFalse(listResp2.getJobIds().contains(JobId.fromId(jobId).get())); // assertTrue(listResp2.getJobIds().contains(JobId.fromId(jobId2).get())); JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor); jobClusterActor.tell(new ListJobsRequest(new ListJobCriteria(empty(), empty(), Lists.newArrayList(), Lists.newArrayList(), Lists.newArrayList(), Lists.newArrayList(), of(true), empty(), empty(), empty())), probe.getRef()); ListJobsResponse listResp3 = probe.expectMsgClass(ListJobsResponse.class); assertEquals(SUCCESS, listResp3.responseCode); assertEquals(0, listResp3.getJobList().size()); // assertFalse(listResp3.getJobIds().contains(JobId.fromId(jobId).get())); // assertFalse(listResp3.getJobIds().contains(JobId.fromId(jobId2).get())); } @Test public void testGetLastSubmittedJob() throws Exception { TestKit probe = new TestKit(system); String clusterName = "testGetLastSubmittedJob"; final JobDefinition jobDefn1 = createJob(clusterName); JobId jobId3 = new JobId(clusterName, 3); JobInfo jInfo3 = new JobInfo(jobId3,jobDefn1, 1000L, null, JobState.Launched, "user1"); JobId jobId4 = new JobId(clusterName, 4); JobInfo jInfo4 = new JobInfo(jobId4,jobDefn1, 2000L, null, JobState.Launched, "user1"); JobId jobId1 = new JobId(clusterName, 1); JobClusterDefinitionImpl.CompletedJob cJob1 = new JobClusterDefinitionImpl.CompletedJob(clusterName, jobId1.getId(), "0.0.1", JobState.Completed, 800L, 900L, "user1", new ArrayList<>()); JobId jobId2 = new JobId(clusterName, 2); JobClusterDefinitionImpl.CompletedJob cJob2 = new JobClusterDefinitionImpl.CompletedJob(clusterName, jobId2.getId(), "0.0.1", JobState.Completed, 900L, 1000L, "user1", new ArrayList<>()); List<JobClusterDefinitionImpl.CompletedJob> completedJobs = new ArrayList<>(); completedJobs.add(cJob1); completedJobs.add(cJob2); List<JobInfo> activeList = new ArrayList<>(); activeList.add(jInfo3); activeList.add(jInfo4); Optional<JobId> lastJobIdOp = JobListHelper.getLastSubmittedJobId(activeList,completedJobs); assertTrue(lastJobIdOp.isPresent()); assertEquals(jobId4, lastJobIdOp.get()); } /** * With only completed jobs getlastSubmitted should return the completed job with highest job number * @throws Exception */ @Test public void testGetLastSubmittedJobWithCompletedOnly() throws Exception { TestKit probe = new TestKit(system); String clusterName = "testGetLastSubmittedJobWithCompletedOnly"; final JobDefinition jobDefn1 = createJob(clusterName); JobId jobId1 = new JobId(clusterName, 1); JobClusterDefinitionImpl.CompletedJob cJob1 = new JobClusterDefinitionImpl.CompletedJob(clusterName, jobId1.getId(), "0.0.1", JobState.Completed, 800L, 900L, "user1", new ArrayList<>()); JobId jobId2 = new JobId(clusterName, 2); JobClusterDefinitionImpl.CompletedJob cJob2 = new JobClusterDefinitionImpl.CompletedJob(clusterName, jobId2.getId(), "0.0.1", JobState.Completed, 900L, 1000L, "user1", new ArrayList<>()); List<JobClusterDefinitionImpl.CompletedJob> completedJobs = new ArrayList<>(); completedJobs.add(cJob1); completedJobs.add(cJob2); List<JobInfo> activeList = new ArrayList<>(); Optional<JobId> lastJobIdOp = JobListHelper.getLastSubmittedJobId(activeList,completedJobs); assertTrue(lastJobIdOp.isPresent()); assertEquals(jobId2, lastJobIdOp.get()); } /** * No Active or completed jobs should return an empty Optional * @throws Exception */ @Test public void testGetLastSubmittedJobWithNoJobs() throws Exception { TestKit probe = new TestKit(system); String clusterName = "testGetLastSubmittedJobWithNoJobs"; final JobDefinition jobDefn1 = createJob(clusterName); List<JobClusterDefinitionImpl.CompletedJob> completedJobs = new ArrayList<>(); List<JobInfo> activeList = new ArrayList<>(); Optional<JobId> lastJobIdOp = JobListHelper.getLastSubmittedJobId(activeList,completedJobs); assertFalse(lastJobIdOp.isPresent()); } @Test public void testListJobWithLabelMatch() { TestKit probe = new TestKit(system); String clusterName = "testListJobWithLabelMatch"; try { MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); final JobDefinition jobDefn1; List<Label> labelList1 = new ArrayList<>(); labelList1.add(new Label("l1","l1v1")); jobDefn1 = createJob(clusterName, labelList1); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe,clusterName, jobClusterActor, jobDefn1, jobId); List<Label> labelList2 = new ArrayList<>(); labelList2.add(new Label("l2","l2v2")); String jobId2 = clusterName + "-2"; JobDefinition jobDefn2 = createJob(clusterName, labelList2); JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn2, jobId2); // Query for Label1 List<Integer> emptyIntList = Lists.newArrayList(); List<WorkerState.MetaState> workerState = Lists.newArrayList(); ListJobCriteria criteria1 = new ListJobCriteria(Optional.empty(), Optional.empty(), emptyIntList, emptyIntList, emptyIntList,workerState,Optional.empty(),Optional.empty(),of("l1=l1v1"), Optional.empty()); jobClusterActor.tell(new ListJobsRequest(criteria1), probe.getRef()); ListJobsResponse listResp = probe.expectMsgClass(ListJobsResponse.class); assertEquals(SUCCESS, listResp.responseCode); // Only job1 should be returned assertEquals(1, listResp.getJobList().size()); assertEquals(jobId, listResp.getJobList().get(0).getJobMetadata().getJobId()); assertTrue(listResp.getJobList().get(0).getStageMetadataList().size() == 1); System.out.println("Workers returned : " + listResp.getJobList().get(0).getWorkerMetadataList()); assertTrue(listResp.getJobList().get(0).getWorkerMetadataList().size() == 1); // Query with an OR query for both labels ListJobCriteria criteria2 = new ListJobCriteria(Optional.empty(), Optional.empty(), emptyIntList, emptyIntList, emptyIntList,workerState,Optional.empty(),Optional.empty(),of("l1=l1v1,l2=l2v2"), Optional.empty()); jobClusterActor.tell(new ListJobsRequest(criteria2), probe.getRef()); ListJobsResponse listRes2 = probe.expectMsgClass(ListJobsResponse.class); assertEquals(SUCCESS, listRes2.responseCode); // Both jobs should be returned assertEquals(2, listRes2.getJobList().size()); assertTrue(jobId.equals(listRes2.getJobList().get(0).getJobMetadata().getJobId()) || jobId.equals(listRes2.getJobList().get(1).getJobMetadata().getJobId())); assertTrue(jobId2.equals(listRes2.getJobList().get(0).getJobMetadata().getJobId()) || jobId2.equals(listRes2.getJobList().get(1).getJobMetadata().getJobId())); // Query with an AND query for both labels ListJobCriteria criteria3 = new ListJobCriteria(Optional.empty(), Optional.empty(), emptyIntList, emptyIntList, emptyIntList,workerState,Optional.empty(),Optional.empty(),of("l1=l1v1,l2=l2v2"), of("and")); jobClusterActor.tell(new ListJobsRequest(criteria3), probe.getRef()); ListJobsResponse listRes3 = probe.expectMsgClass(ListJobsResponse.class); assertEquals(SUCCESS, listRes3.responseCode); // No jobs should be returned assertEquals(0, listRes3.getJobList().size()); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void testLostWorkerGetsReplaced() { TestKit probe = new TestKit(system); String clusterName = "testLostWorkerGetsReplaced"; MantisScheduler schedulerMock = mock(MantisScheduler.class); //MantisJobStore jobStoreMock = mock(MantisJobStore.class); MantisJobStore jobStoreSpied = Mockito.spy(jobStore); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName); ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreSpied, schedulerMock, eventPublisher)); jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef()); JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class); assertEquals(SUCCESS, createResp.responseCode); try { final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient); String jobId = clusterName + "-1"; JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId); // JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted); // JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor); verify(jobStoreSpied, times(1)).createJobCluster(any()); verify(jobStoreSpied, times(1)).updateJobCluster(any()); int stageNo = 1; // send launched event WorkerId workerId = new WorkerId(jobId, 0, 1); // send heartbeat JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobClusterActor, jobId, stageNo, workerId); // check job status again jobClusterActor.tell(new GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp2 + " msg " + resp2.message); assertEquals(SUCCESS, resp2.responseCode); // Job started assertEquals(JobState.Launched, resp2.getJobMetadata().get().getState()); // send launched event // worker 2 gets terminated abnormally JobTestHelper.sendWorkerTerminatedEvent(probe, jobClusterActor, jobId, workerId); // replaced worker comes up and sends events WorkerId workerId2_replaced = new WorkerId(jobId, 0, 2); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobClusterActor, jobId, stageNo, workerId2_replaced); jobClusterActor.tell(new GetJobDetailsRequest("nj", jobId), probe.getRef()); GetJobDetailsResponse resp4 = probe.expectMsgClass(GetJobDetailsResponse.class); IMantisJobMetadata jobMeta = resp4.getJobMetadata().get(); Map<Integer, ? extends IMantisStageMetadata> stageMetadata = jobMeta.getStageMetadata(); IMantisStageMetadata stage = stageMetadata.get(1); for (JobWorker worker : stage.getAllWorkers()) { System.out.println("worker -> " + worker.getMetadata()); } // 2 initial schedules and 1 replacement verify(schedulerMock, timeout(1_000).times(2)).scheduleWorker(any()); // archive worker should get called once for the dead worker // verify(jobStoreMock, timeout(1_000).times(1)).archiveWorker(any()); Mockito.verify(jobStoreSpied).archiveWorker(any()); jobClusterActor.tell(new ListJobsRequest(), probe.getRef()); ListJobsResponse listResp2 = probe.expectMsgClass(ListJobsResponse.class); assertEquals(SUCCESS, listResp2.responseCode); assertEquals(1, listResp2.getJobList().size()); for(MantisJobMetadataView jb : listResp2.getJobList() ) { System.out.println("Jb -> " + jb); } //assertEquals(jobActor, probe.getLastSender()); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } finally { system.stop(jobClusterActor); } } @Test public void testExpireOldJobs() { //TODO } }
4,218
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/LabelManagerTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import static io.mantisrx.master.jobcluster.LabelManager.SystemLabels.*; import static org.junit.Assert.assertEquals; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.common.Label; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.command.InvalidJobException; import io.mantisrx.server.master.domain.JobDefinition; import org.junit.Test; public class LabelManagerTest { @Test public void insertResubmitLabelTest() throws InvalidJobException { JobDefinition jobDefinition = generateJobDefinition("insertResubmitLabelTest", new ArrayList<>(), "art.zip", "1.0"); JobDefinition updatedJobDefn = LabelManager.insertAutoResubmitLabel(jobDefinition); assertEquals(1, updatedJobDefn.getLabels().size()); Label label = updatedJobDefn.getLabels().get(0); assertEquals(MANTIS_IS_RESUBMIT_LABEL.label, label.getName()); } @Test public void doNotinsertResubmitLabelIfAlreadyExistsTest() throws InvalidJobException { List<Label> labels = new ArrayList<>(); labels.add(new Label(MANTIS_IS_RESUBMIT_LABEL.label, "true")); JobDefinition jobDefinition = generateJobDefinition("DoNotinsertResubmitLabelIfAlreadyExistsTest", labels, "art.zip", "1.0"); JobDefinition updatedJobDefn = LabelManager.insertAutoResubmitLabel(jobDefinition); assertEquals(1, updatedJobDefn.getLabels().size()); Label label = updatedJobDefn.getLabels().get(0); assertEquals(MANTIS_IS_RESUBMIT_LABEL.label, label.getName()); } @Test public void insertArtifactLabelTest() throws InvalidJobException { String artifactName = "art.zip"; JobDefinition jobDefinition = generateJobDefinition("insertResubmitLabelTest", new ArrayList<>(), artifactName, "1.0"); JobDefinition updatedJobDefn = LabelManager.insertSystemLabels(jobDefinition, false); assertEquals(2, updatedJobDefn.getLabels().size()); List<Label> labels = updatedJobDefn.getLabels().stream().filter( label -> label.getName().equals(MANTIS_ARTIFACT_LABEL.label)) .collect(Collectors.toList()); Label label = labels.get(0); assertEquals(MANTIS_ARTIFACT_LABEL.label, label.getName()); assertEquals(artifactName, label.getValue()); } @Test public void replaceArtifactLabelTest() throws InvalidJobException { String artifactName = "art1.zip"; List<Label> labels = new ArrayList<>(); labels.add(new Label(MANTIS_ARTIFACT_LABEL.label, "art0.zip")); JobDefinition jobDefinition = generateJobDefinition("replaceArtifactLabelTest", labels, artifactName, "1.0"); JobDefinition updatedJobDefn = LabelManager.insertSystemLabels(jobDefinition, false); assertEquals(2, updatedJobDefn.getLabels().size()); labels = updatedJobDefn.getLabels().stream().filter( label -> label.getName().equals(MANTIS_ARTIFACT_LABEL.label)) .collect(Collectors.toList()); Label label = labels.get(0); assertEquals(MANTIS_ARTIFACT_LABEL.label, label.getName()); assertEquals(artifactName, label.getValue()); } @Test public void insertVersionLabelTest() throws InvalidJobException { String artifactName = "art.zip"; JobDefinition jobDefinition = generateJobDefinition("insertVersionLabelTest", new ArrayList<>(), artifactName, "1.0"); JobDefinition updatedJobDefn = LabelManager.insertSystemLabels(jobDefinition, false); assertEquals(2, updatedJobDefn.getLabels().size()); List<Label> labels = updatedJobDefn.getLabels().stream().filter( label -> label.getName().equals(MANTIS_VERSION_LABEL.label)) .collect(Collectors.toList()); Label label = labels.get(0); assertEquals(MANTIS_VERSION_LABEL.label, label.getName()); assertEquals("1.0", label.getValue()); } @Test public void replaceVersionLabelTest() throws InvalidJobException { String artifactName = "art1.zip"; String v0 = "1.0"; String v1 = "2.0"; List<Label> labels = new ArrayList<>(); labels.add(new Label(MANTIS_VERSION_LABEL.label, v0)); JobDefinition jobDefinition = generateJobDefinition("replaceVersionLabelTest", labels, artifactName, "2.0"); JobDefinition updatedJobDefn = LabelManager.insertSystemLabels(jobDefinition, false); assertEquals(2, updatedJobDefn.getLabels().size()); labels = updatedJobDefn.getLabels().stream().filter( label -> label.getName().equals(MANTIS_VERSION_LABEL.label)) .collect(Collectors.toList()); Label label = labels.get(0); assertEquals(MANTIS_VERSION_LABEL.label, label.getName()); assertEquals(v1, label.getValue()); } @Test public void systemLabelTest() throws InvalidJobException { String artifactName = "art1.zip"; List<Label> labels = new ArrayList<>(); labels.add(new Label(MANTIS_ARTIFACT_LABEL.label, "art0.zip")); JobDefinition jobDefinition = generateJobDefinition("systemLabelTest", labels, artifactName,"1.0"); JobDefinition updatedJobDefn = LabelManager.insertSystemLabels(jobDefinition, true); assertEquals(3, updatedJobDefn.getLabels().size()); for(Label l : updatedJobDefn.getLabels()) { if(l.getName().equals(MANTIS_ARTIFACT_LABEL.label)) { assertEquals(artifactName, l.getValue()); } else if (l.getName().equals(MANTIS_IS_RESUBMIT_LABEL.label)){ assertEquals("true", l.getValue()); } else { assertEquals("1.0", l.getValue()); } } } JobDefinition generateJobDefinition(String name, List<Label> labelList, String artifactName, String version) throws InvalidJobException { return new JobDefinition.Builder() .withName(name) .withParameters(Lists.newArrayList()) .withLabels(labelList) .withSchedulingInfo(JobClusterTest.SINGLE_WORKER_SCHED_INFO) .withArtifactName(artifactName) .withVersion(version) .withSubscriptionTimeoutSecs(1) .withUser("njoshi") .withJobSla(new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, "userType")) .build(); } }
4,219
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/JobManagerTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import akka.actor.AbstractActor; import akka.actor.ActorContext; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.jobcluster.job.JobTestHelper; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.junit.BeforeClass; import org.junit.Test; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.testkit.javadsl.TestKit; import io.mantisrx.master.jobcluster.JobClusterActor.JobInfo; import io.mantisrx.master.jobcluster.JobClusterActor.JobManager; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.IMantisStorageProvider; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.SimpleCachedFileStorageProvider; import static java.util.Optional.empty; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.*; import java.io.IOException; import java.time.Instant; import java.util.List; import java.util.Optional; public class JobManagerTest { private static MantisJobStore jobStore; private static AbstractActor.ActorContext context; private static MantisScheduler scheduler; private static LifecycleEventPublisher publisher; @BeforeClass public static void setup() { jobStore = mock(MantisJobStore.class); context = mock(AbstractActor.ActorContext.class); scheduler = mock(MantisScheduler.class); publisher = mock(LifecycleEventPublisher.class); JobTestHelper.createDirsIfRequired(); TestHelpers.setupMasterConfig(); } @Test public void acceptedToActive() { JobClusterActor.JobManager jm = new JobManager("name", context, scheduler, publisher, jobStore); JobId jId1 = new JobId("name",1); JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj"); assertTrue(jm.markJobAccepted(jInfo1)); assertEquals(1, jm.acceptedJobsCount()); assertTrue(jm.markJobStarted(jInfo1)); assertEquals(0, jm.acceptedJobsCount()); assertEquals(1, jm.activeJobsCount()); assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo1)); } @Test public void acceptedToCompleted() { JobClusterActor.JobManager jm = new JobManager("name", context, scheduler, publisher, jobStore); JobId jId1 = new JobId("name",1); JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj"); assertTrue(jm.markJobAccepted(jInfo1)); assertEquals(1, jm.acceptedJobsCount()); assertTrue(jm.getCompletedJobsList().size() == 0); assertTrue(jm.markCompleted(jId1,empty(),JobState.Completed).isPresent()); assertEquals(0, jm.acceptedJobsCount()); assertEquals(1, jm.getCompletedJobsList().size()); assertEquals(0, jm.activeJobsCount()); assertFalse(jm.getAllNonTerminalJobsList().contains(jInfo1)); assertTrue(jm.getCompletedJobsList().size() == 1); JobClusterDefinitionImpl.CompletedJob completedJob = jm.getCompletedJobsList().get(0); assertEquals(jId1.getId(), completedJob.getJobId()); } @Test public void acceptedToTerminating() { JobClusterActor.JobManager jm = new JobManager("name", context, scheduler, publisher, jobStore); JobId jId1 = new JobId("name",1); JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj"); assertTrue(jm.markJobAccepted(jInfo1)); assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo1)); assertEquals(1, jm.acceptedJobsCount()); assertTrue(jm.markJobTerminating(jInfo1, JobState.Terminating_abnormal)); assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo1)); assertEquals(0, jm.acceptedJobsCount()); assertEquals(0 , jm.activeJobsCount()); Optional<JobInfo> j1 = jm.getJobInfoForNonTerminalJob(jId1); assertTrue(j1.isPresent()); assertEquals(jId1, j1.get().jobId); } @Test public void terminatingToActiveIsIgnored() { JobClusterActor.JobManager jm = new JobManager("name", context, scheduler, publisher, jobStore); JobId jId1 = new JobId("name",1); JobDefinition jdMock = mock(JobDefinition.class); JobInfo jInfo1 = new JobInfo(jId1, jdMock, 0, null, JobState.Accepted, "nj"); jm.markJobAccepted(jInfo1); assertEquals(1, jm.acceptedJobsCount()); Optional<JobInfo> jInfo1Op = jm.getJobInfoForNonTerminalJob(jId1); assertTrue(jInfo1Op.isPresent()); assertTrue(jm.markJobTerminating(jInfo1Op.get(), JobState.Terminating_abnormal)); jInfo1Op = jm.getJobInfoForNonTerminalJob(jId1); assertTrue(jInfo1Op.isPresent()); assertFalse(jm.markJobStarted(jInfo1Op.get())); } @Test public void activeToAcceptedFails() { JobClusterActor.JobManager jm = new JobManager("name", context, scheduler, publisher, jobStore); JobId jId1 = new JobId("name",1); JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj"); assertTrue(jm.markJobAccepted(jInfo1)); assertEquals(1, jm.acceptedJobsCount()); assertTrue(jm.markJobTerminating(jInfo1, JobState.Terminating_abnormal)); assertFalse(jm.markJobAccepted(jInfo1)); } @Test public void testGetAcceptedJobList() { JobClusterActor.JobManager jm = new JobManager("name", context, scheduler, publisher, jobStore); JobId jId1 = new JobId("name",1); JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj"); assertTrue(jm.markJobAccepted(jInfo1)); JobId jId2 = new JobId("name",2); JobInfo jInfo2 = new JobInfo(jId2, null, 0, null, JobState.Accepted, "nj"); assertTrue(jm.markJobAccepted(jInfo2)); List<JobInfo> acceptedJobList = jm.getAcceptedJobsList(); assertEquals(2, acceptedJobList.size()); assertTrue(jId1.equals(acceptedJobList.get(0).jobId) || jId1.equals(acceptedJobList.get(1).jobId)); assertTrue(jId2.equals(acceptedJobList.get(0).jobId) || jId2.equals(acceptedJobList.get(1).jobId)); try { acceptedJobList.remove(0); fail(); } catch (Exception e) { } } @Test public void testGetActiveJobList() { JobClusterActor.JobManager jm = new JobManager("name", context, scheduler, publisher, jobStore); JobId jId1 = new JobId("name",1); JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj"); assertTrue(jm.markJobAccepted(jInfo1)); assertTrue(jm.markJobStarted(jInfo1)); JobId jId2 = new JobId("name",2); JobInfo jInfo2 = new JobInfo(jId2, null, 0, null, JobState.Accepted, "nj"); assertTrue(jm.markJobAccepted(jInfo2)); assertTrue(jm.markJobStarted(jInfo2)); List<JobInfo> acceptedJobList = jm.getAcceptedJobsList(); assertEquals(0, acceptedJobList.size()); List<JobInfo> activeJobList = jm.getActiveJobsList(); assertEquals(2, jm.getActiveJobsList().size()); assertTrue(jId1.equals(activeJobList.get(0).jobId) || jId1.equals(activeJobList.get(1).jobId)); assertTrue(jId2.equals(activeJobList.get(0).jobId) || jId2.equals(activeJobList.get(1).jobId)); try { activeJobList.remove(0); fail(); } catch (Exception e) { } } @Test public void testPurgeOldJobs() { String clusterName = "testPurgeOldJobs"; MantisJobStore jobStoreMock = mock(MantisJobStore.class); JobClusterActor.JobManager jm = new JobManager(clusterName, context, scheduler, publisher, jobStoreMock); JobId jId1 = new JobId(clusterName,1); JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj"); assertTrue(jm.markJobAccepted(jInfo1)); assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo1)); JobId jId2 = new JobId(clusterName,2); JobInfo jInfo2 = new JobInfo(jId2, null, 1, null, JobState.Accepted, "nj"); assertTrue(jm.markJobAccepted(jInfo2)); assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo2)); assertTrue(jm.getAllNonTerminalJobsList().size() == 2); assertEquals(jInfo1 ,jm.getAllNonTerminalJobsList().get(1)); assertEquals(jInfo2 ,jm.getAllNonTerminalJobsList().get(0)); jm.markJobTerminating(jInfo1, JobState.Terminating_abnormal); Instant completionInstant = Instant.now().minusSeconds(5); jm.markCompleted(jId1,completionInstant.toEpochMilli(),empty(),JobState.Completed); assertEquals(1,jm.getCompletedJobsList().size()); assertEquals(jId1.getId(), jm.getCompletedJobsList().get(0).getJobId()); jm.purgeOldCompletedJobs(Instant.now().minusSeconds(3).toEpochMilli()); assertEquals(0,jm.getCompletedJobsList().size()); try { verify(jobStoreMock,times(1)).deleteCompletedJob(clusterName,jId1.getId()); verify(jobStoreMock,times(1)).deleteJob(jId1.getId()); } catch (IOException e) { e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void testJobListSortedCorrectly() { String clusterName = "testJobListSortedCorrectly"; MantisJobStore jobStoreMock = mock(MantisJobStore.class); JobClusterActor.JobManager jm = new JobManager(clusterName, context, scheduler, publisher, jobStoreMock); JobId jId1 = new JobId(clusterName,1); JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj"); assertTrue(jm.markJobAccepted(jInfo1)); assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo1)); JobId jId2 = new JobId(clusterName,2); JobInfo jInfo2 = new JobInfo(jId2, null, 1, null, JobState.Accepted, "nj"); assertTrue(jm.markJobAccepted(jInfo2)); assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo2)); assertTrue(jm.getAllNonTerminalJobsList().size() == 2); assertEquals(jInfo1 ,jm.getAllNonTerminalJobsList().get(1)); assertEquals(jInfo2 ,jm.getAllNonTerminalJobsList().get(0)); jm.markJobTerminating(jInfo1, JobState.Terminating_abnormal); Instant completionInstant = Instant.now().minusSeconds(5); jm.markCompleted(jId1,completionInstant.toEpochMilli(),empty(),JobState.Completed); assertEquals(1,jm.getCompletedJobsList().size()); assertEquals(jId1.getId(), jm.getCompletedJobsList().get(0).getJobId()); jm.markJobTerminating(jInfo1, JobState.Terminating_abnormal); completionInstant = Instant.now().minusSeconds(2); jm.markCompleted(jId2,completionInstant.toEpochMilli(),empty(),JobState.Completed); assertEquals(2,jm.getCompletedJobsList().size()); assertEquals(jId2.getId(), jm.getCompletedJobsList().get(0).getJobId()); assertEquals(jId1.getId(), jm.getCompletedJobsList().get(1).getJobId()); } }
4,220
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/job/JobTestMigrationTests.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import java.time.Instant; import java.util.List; import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.testkit.javadsl.TestKit; import io.mantisrx.shaded.com.google.common.collect.Lists; import com.netflix.fenzo.VirtualMachineCurrentState; import com.netflix.fenzo.VirtualMachineLease; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.jobcluster.proto.JobProto; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.WorkerMigrationConfig.MigrationStrategyEnum; import io.mantisrx.runtime.command.InvalidJobException; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.domain.IJobClusterDefinition; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.scheduler.ScheduleRequest; import io.mantisrx.server.master.scheduler.WorkerOnDisabledVM; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; public class JobTestMigrationTests { static ActorSystem system; private static final String user = "mantis"; final LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); @BeforeClass public static void setup() { system = ActorSystem.create(); TestHelpers.setupMasterConfig(); } @AfterClass public static void tearDown() { //((SimpleCachedFileStorageProvider)storageProvider).deleteAllFiles(); TestKit.shutdownActorSystem(system); system = null; } @Test public void testWorkerMigration() { String clusterName= "testWorkerMigration"; TestKit probe = new TestKit(system); SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList()).build(); IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName, sInfo, new WorkerMigrationConfig(MigrationStrategyEnum.ONE_WORKER, "{}")); CountDownLatch scheduleCDL = new CountDownLatch(2); CountDownLatch unscheduleCDL = new CountDownLatch(1); JobDefinition jobDefn; try { jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo); MantisScheduler schedulerMock = new DummyScheduler(scheduleCDL, unscheduleCDL); //mock(MantisScheduler.class); // MantisJobStore jobStoreMock = mock(MantisJobStore.class); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName,2)) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefn) .build(); final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher)); jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class); assertEquals(SUCCESS, initMsg.responseCode); String jobId = clusterName + "-2"; int stageNo = 1; WorkerId workerId = new WorkerId(jobId, 0, 1); // send Launched, Initiated and heartbeat JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId); // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); JobClusterManagerProto.GetJobDetailsResponse resp3 = probe.expectMsgClass(JobClusterManagerProto.GetJobDetailsResponse.class); assertEquals(SUCCESS, resp3.responseCode); // worker has started so job should be started. assertEquals(JobState.Launched,resp3.getJobMetadata().get().getState()); // Send migrate worker message jobActor.tell(new WorkerOnDisabledVM(workerId), probe.getRef()); // Trigger check hb status and that should start the migration. And migrate first worker Instant now = Instant.now(); jobActor.tell(new JobProto.CheckHeartBeat(), probe.getRef()); // send HB for the migrated worker WorkerId migratedWorkerId1 = new WorkerId(jobId, 0, 2); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, migratedWorkerId1); // Trigger another check should be noop // jobActor.tell(new JobProto.CheckHeartBeat(now.plusSeconds(120)), probe.getRef()); scheduleCDL.await(1, TimeUnit.SECONDS); unscheduleCDL.await(1, TimeUnit.SECONDS); // // 1 original submissions and 1 resubmit because of migration // when(schedulerMock.scheduleWorker(any())). // verify(schedulerMock, times(2)).scheduleWorker(any()); //// // 1 kill due to resubmits // verify(schedulerMock, times(1)).unscheduleWorker(any(), any()); // //assertEquals(jobActor, probe.getLastSender()); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } } class DummyScheduler implements MantisScheduler { CountDownLatch schedL; CountDownLatch unschedL; public DummyScheduler(CountDownLatch scheduleCDL, CountDownLatch unscheduleCDL) { schedL = scheduleCDL; unschedL = unscheduleCDL; } @Override public void scheduleWorker(ScheduleRequest scheduleRequest) { // TODO Auto-generated method stub System.out.println("----------------------> schedule Worker Called"); schedL.countDown(); } @Override public void unscheduleWorker(WorkerId workerId, Optional<String> hostname) { // TODO Auto-generated method stub unschedL.countDown(); } @Override public void unscheduleAndTerminateWorker(WorkerId workerId, Optional<String> hostname) { // TODO Auto-generated method stub } @Override public void updateWorkerSchedulingReadyTime(WorkerId workerId, long when) { // TODO Auto-generated method stub } @Override public void initializeRunningWorker(ScheduleRequest scheduleRequest, String hostname) { // TODO Auto-generated method stub } @Override public void rescindOffer(String offerId) { // TODO Auto-generated method stub } @Override public void rescindOffers(String hostname) { // TODO Auto-generated method stub } @Override public void addOffers(List<VirtualMachineLease> offers) { // TODO Auto-generated method stub } @Override public void disableVM(String hostname, long durationMillis) throws IllegalStateException { // TODO Auto-generated method stub } @Override public void enableVM(String hostname) { // TODO Auto-generated method stub } @Override public List<VirtualMachineCurrentState> getCurrentVMState() { // TODO Auto-generated method stub return null; } @Override public void setActiveVmGroups(List<String> activeVmGroups) { // TODO Auto-generated method stub } } public static void main(String[] args) { } }
4,221
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/job/WorkerResubmitRateLimiterTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import io.mantisrx.server.core.domain.WorkerId; import org.junit.Test; import java.util.List; import static org.junit.Assert.*; public class WorkerResubmitRateLimiterTest { @Test public void ctorTest() { WorkerResubmitRateLimiter wrrl = new WorkerResubmitRateLimiter("5:10:15", 5); assertEquals(5,wrrl.getExpireResubmitDelaySecs()); long [] resubmitIntervalArray = wrrl.getResubmitIntervalSecs(); assertEquals(4, resubmitIntervalArray.length); assertEquals(0, resubmitIntervalArray[0]); assertEquals(5, resubmitIntervalArray[1]); assertEquals(10, resubmitIntervalArray[2]); assertEquals(15, resubmitIntervalArray[3]); } @Test public void ctorTest_nointervalgiven() { WorkerResubmitRateLimiter wrrl = new WorkerResubmitRateLimiter("", 5); assertEquals(5,wrrl.getExpireResubmitDelaySecs()); long [] resubmitIntervalArray = wrrl.getResubmitIntervalSecs(); assertEquals(4, resubmitIntervalArray.length); assertEquals(0, resubmitIntervalArray[0]); assertEquals(5, resubmitIntervalArray[1]); assertEquals(10, resubmitIntervalArray[2]); assertEquals(20, resubmitIntervalArray[3]); try { wrrl = new WorkerResubmitRateLimiter("", 0); fail(); } catch(Exception e) { } try { wrrl = new WorkerResubmitRateLimiter("", -1); fail(); } catch(Exception e) { } } @Test public void addWorkerTest() { WorkerResubmitRateLimiter wrrl = new WorkerResubmitRateLimiter("5:10:15", 5); int stageNum = 1; long currTime = System.currentTimeMillis(); WorkerId workerId = new WorkerId("TestJob-1", 0, 1); long resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime); assertEquals(currTime, resubmitTime); resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime); assertEquals(currTime + 5000, resubmitTime); resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime); assertEquals(currTime + 10000, resubmitTime); resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime); assertEquals(currTime + 15000, resubmitTime); resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime); assertEquals(currTime + 15000, resubmitTime); } @Test public void addMultipleWorkerTest() { WorkerResubmitRateLimiter wrrl = new WorkerResubmitRateLimiter("5:10:15", 5); int stageNum = 1; long currTime = System.currentTimeMillis(); WorkerId workerId = new WorkerId("TestJob-1", 0, 1); WorkerId workerId2 = new WorkerId("TestJob-1", 1, 2); long resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime); assertEquals(currTime, resubmitTime); resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime); assertEquals(currTime + 5000, resubmitTime); resubmitTime = wrrl.getWorkerResubmitTime(workerId2, stageNum, currTime); assertEquals(currTime, resubmitTime); resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime); assertEquals(currTime + 10000, resubmitTime); resubmitTime = wrrl.getWorkerResubmitTime(workerId2, stageNum, currTime); assertEquals(currTime + 5000, resubmitTime); resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime); assertEquals(currTime + 15000, resubmitTime); resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime); assertEquals(currTime + 15000, resubmitTime); } @Test public void expireOldEntryTest() { WorkerResubmitRateLimiter wrrl = new WorkerResubmitRateLimiter("5:10:15", 5); int stageNum = 1; long currTime = System.currentTimeMillis(); WorkerId workerId = new WorkerId("TestJob-1", 0, 1); WorkerId workerId2 = new WorkerId("TestJob-1", 1, 2); long resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime); List<WorkerResubmitRateLimiter.ResubmitRecord> resubmitRecords = wrrl.getResubmitRecords(); assertTrue(resubmitRecords.size() == 1); currTime += 4_000; resubmitTime = wrrl.getWorkerResubmitTime(workerId2, stageNum, currTime); resubmitRecords = wrrl.getResubmitRecords(); assertEquals(2, resubmitRecords.size()); // Move time now to 6 seconds which is greater than expiry time of 5 currTime += 2000; // This should expire worker id 1 but not 2 wrrl.expireResubmitRecords(currTime); resubmitRecords = wrrl.getResubmitRecords(); assertEquals(1, resubmitRecords.size()); assertEquals(stageNum + "_" + workerId2.getWorkerIndex(), resubmitRecords.get(0).getWorkerKey()); } }
4,222
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/job/JobTestTimeout.java
//package io.mantisrx.master.jobcluster.job; // //import static org.junit.Assert.assertFalse; //import static org.junit.Assert.assertTrue; //import static org.junit.Assert.fail; // //import java.time.Instant; // //import org.junit.AfterClass; //import org.junit.BeforeClass; //import org.junit.Test; // //import com.google.common.collect.Lists; // //import akka.actor.ActorSystem; //import akka.testkit.javadsl.TestKit; //import io.mantisrx.master.jobcluster.job.JobActor.SubscriptionTracker; //import io.mantisrx.runtime.JobOwner; //import io.mantisrx.runtime.MachineDefinition; //import io.mantisrx.runtime.WorkerMigrationConfig; //import io.mantisrx.runtime.descriptor.SchedulingInfo; //import io.mantisrx.server.master.domain.IJobClusterDefinition; //import io.mantisrx.server.master.domain.JobClusterConfig; //import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; //import io.mantisrx.server.master.persistence.IMantisStorageProvider; //import io.mantisrx.server.master.persistence.MantisJobStore; //import io.mantisrx.server.master.persistence.SimpleCachedFileStorageProvider; // //public class JobTestTimeout { // // static ActorSystem system; // private static TestKit probe; // private static String name; // private static MantisJobStore jobStore; // private static IMantisStorageProvider storageProvider; // private static final String user = "mantis"; // private static IJobClusterDefinition jobClusterDefn ; // // @BeforeClass // public static void setup() { // system = ActorSystem.create(); // // system = ActorSystem.create(); // probe = new TestKit(system); // name = "testCluster"; // // JobClusterConfig clusterConfig = new JobClusterConfig.Builder() // .withArtifactName("myart") // .withParameters(Lists.newArrayList()) // .withSchedulingInfo(new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(new MachineDefinition(0, 0, 0, 0, 0), Lists.newArrayList(), Lists.newArrayList()).build()) // .withSubscriptionTimeoutSecs(0) // .withVersion("0.0.1") // // .build(); // // jobClusterDefn = new JobClusterDefinitionImpl.Builder() // .withJobClusterConfig(clusterConfig) // .withName(name) // // .withSubscriptionTimeoutSecs(0) // .withUser(user) // .withIsReadyForJobMaster(true) // .withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo")) // .withMigrationConfig(WorkerMigrationConfig.DEFAULT) // // .build(); // // // storageProvider = new SimpleCachedFileStorageProvider(); // jobStore = new MantisJobStore(storageProvider); // // // } // // @AfterClass // public static void tearDown() { // ((SimpleCachedFileStorageProvider)storageProvider).deleteAllFiles(); // TestKit.shutdownActorSystem(system); // system = null; // } // // // // // @Test // public void testHasTimedout() { // long subsTimeout = 30; // long minRuntime = 5; // long maxRuntime = Long.MAX_VALUE; // SubscriptionTracker st = new SubscriptionTracker(subsTimeout,minRuntime, maxRuntime); // Instant now = Instant.now(); // st.onJobStart(now); // // less than min runtime will not timeout // assertFalse(st.shouldTerminate(now.plusSeconds(3))); // // // equal to min runtime will not timeout // assertFalse(st.shouldTerminate(now.plusSeconds(5))); // // // greater than min runtime and but subscription time out not hit // assertFalse(st.shouldTerminate(now.plusSeconds(7))); // // // if it is subscribed then min runtime does not matter // st.onSubscribe(); // assertFalse(st.shouldTerminate(now.plusSeconds(7))); // // st.onUnSubscribe(now.plusSeconds(10)); // // subs timeout timer will now start // // timeout will happen at t + 10 + 30 seconds // assertFalse(st.shouldTerminate(now.plusSeconds(32))); // // assertTrue(st.shouldTerminate(now.plusSeconds(40))); // // assertTrue(st.shouldTerminate(now.plusSeconds(42))); // } // // @Test // public void testMinRuntimeGreater() { // long subsTimeout = 30; // long minRuntime = 40; // long maxRuntime = Long.MAX_VALUE; // SubscriptionTracker st = new SubscriptionTracker(subsTimeout, minRuntime, maxRuntime); // Instant now = Instant.now(); // st.onJobStart(now); // // less than min runtime will not timeout // assertFalse(st.shouldTerminate(now.plusSeconds(35))); // // // equal to min runtime will not timeout // assertFalse(st.shouldTerminate(now.plusSeconds(40))); // // // greater than min runtime and subscription time out hit // assertTrue(st.shouldTerminate(now.plusSeconds(47))); // // // if it is subscribed then min runtime does not matter // st.onSubscribe(); // assertFalse(st.shouldTerminate(now.plusSeconds(47))); // // st.onUnSubscribe(now.plusSeconds(50)); // // subs timeout timer will now start // // timeout will happen at t + 50 + 30 seconds // assertFalse(st.shouldTerminate(now.plusSeconds(62))); // // assertTrue(st.shouldTerminate(now.plusSeconds(80))); // // assertTrue(st.shouldTerminate(now.plusSeconds(82))); // } // // @Test // public void testHasNoTimeoutSet() { // long subsTimeout = Long.MAX_VALUE; // long minRuntime = 0; // long maxRuntime = Long.MAX_VALUE; // SubscriptionTracker st = new SubscriptionTracker(subsTimeout, minRuntime, maxRuntime); // Instant now = Instant.now(); // st.onJobStart(now); // // less than min runtime will not timeout // assertFalse(st.shouldTerminate(now.plusSeconds(3))); // // // equal to min runtime will not timeout // assertFalse(st.shouldTerminate(now.plusSeconds(5))); // // // greater than min runtime and but subscription time out not hit // assertFalse(st.shouldTerminate(now.plusSeconds(7))); // // // if it is subscribed then min runtime does not matter // st.onSubscribe(); // assertFalse(st.shouldTerminate(now.plusSeconds(7))); // // st.onUnSubscribe(now.plusSeconds(10)); // // subs timeout timer will now start // // timeout will happen at t + 10 + 30 seconds // assertFalse(st.shouldTerminate(now.plusSeconds(32))); // // assertFalse(st.shouldTerminate(now.plusSeconds(40))); // // assertFalse(st.shouldTerminate(now.plusSeconds(42))); // } // // @Test // public void testHasMinRuntimeTimeoutSetOnly() { // long subsTimeout = 30; // long minRuntime = 5; // long maxRuntime = Long.MAX_VALUE; // // If subs timeout is not explicitly set it is set to the default of 30 // SubscriptionTracker st = new SubscriptionTracker(subsTimeout, minRuntime, maxRuntime); // Instant now = Instant.now(); // st.onJobStart(now); // // less than min runtime will not timeout // assertFalse(st.shouldTerminate(now.plusSeconds(3))); // // // equal to min runtime will not timeout // assertFalse(st.shouldTerminate(now.plusSeconds(5))); // // // greater than min runtime and but subscription time out not hit // assertFalse(st.shouldTerminate(now.plusSeconds(7))); // // // if it is subscribed then min runtime does not matter // st.onSubscribe(); // assertFalse(st.shouldTerminate(now.plusSeconds(7))); // // st.onUnSubscribe(now.plusSeconds(10)); // // subs timeout timer will now start // // timeout will happen at t + 10 + 30 seconds // assertFalse(st.shouldTerminate(now.plusSeconds(32))); // // assertTrue(st.shouldTerminate(now.plusSeconds(40))); // // assertTrue(st.shouldTerminate(now.plusSeconds(42))); // } // // @Test // public void testHasMaxRuntimeTimeout() { // long subsTimeout = 30; // long minRuntime = 5; // long maxRuntime = 40; // // If subs timeout is not explicitly set it is set to the default of 30 // SubscriptionTracker st = new SubscriptionTracker(subsTimeout, minRuntime, maxRuntime); // Instant now = Instant.now(); // st.onJobStart(now); // // less than min runtime will not timeout // assertFalse(st.shouldTerminate(now.plusSeconds(3))); // // // equal to min runtime will not timeout // assertFalse(st.shouldTerminate(now.plusSeconds(5))); // // // greater than min runtime and but subscription time out not hit // assertFalse(st.shouldTerminate(now.plusSeconds(7))); // // // if it is subscribed then min runtime does not matter // st.onSubscribe(); // assertFalse(st.shouldTerminate(now.plusSeconds(7))); // // // max runtime exceeded // // assertFalse(st.shouldTerminate(now.plusSeconds(42))); // } // // public void testMaxLessThanMinRuntime() { // long subsTimeout = 30; // long minRuntime = 5; // long maxRuntime = 4; // // If subs timeout is not explicitly set it is set to the default of 30 // try { // SubscriptionTracker st = new SubscriptionTracker(subsTimeout, minRuntime, maxRuntime); // fail(); // } catch (IllegalArgumentException e) { // // } // // // } //}
4,223
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/job/JobScaleUpDownTests.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.testkit.javadsl.TestKit; import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.shaded.com.google.common.collect.Lists; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.jobcluster.proto.JobClusterProto; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason; import io.mantisrx.runtime.descriptor.StageScalingPolicy.Strategy; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.core.WorkerAssignments; import io.mantisrx.server.core.WorkerHost; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.exceptions.InvalidJobException; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import rx.schedulers.Schedulers; import rx.subjects.BehaviorSubject; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; public class JobScaleUpDownTests { static ActorSystem system; final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); @BeforeClass public static void setup() { system = ActorSystem.create(); TestHelpers.setupMasterConfig(); } @AfterClass public static void tearDown() { TestKit.shutdownActorSystem(system); system = null; } ////////////////////////Scale up Tests //////////////////////////////////// @Test public void testJobScaleUp() throws Exception, InvalidJobException, io.mantisrx.runtime.command.InvalidJobException { final TestKit probe = new TestKit(system); Map<ScalingReason, Strategy> smap = new HashMap<>(); smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null)); smap.put(ScalingReason.DataDrop, new Strategy(ScalingReason.DataDrop, 0.0, 2.0, null)); SchedulingInfo sInfo = new SchedulingInfo.Builder() .numberOfStages(1) .multiWorkerScalableStageWithConstraints(1, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap)) .build(); String clusterName = "testJobScaleUp"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher); // send scale up request jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName+"-1", 1, 2, "", ""), probe.getRef()); JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class); System.out.println("ScaleupResp " + scaleResp.message); assertEquals(SUCCESS, scaleResp.responseCode); assertEquals(2,scaleResp.getActualNumWorkers()); verify(jobStoreMock, times(1)).storeNewJob(any()); // initial worker verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); //scale up worker verify(jobStoreMock, times(1)).storeNewWorker(any()); verify(jobStoreMock, times(6)).updateWorker(any()); verify(jobStoreMock, times(3)).updateJob(any()); // initial worker + job master and scale up worker verify(schedulerMock, times(3)).scheduleWorker(any()); } @Test public void testJobScaleDown() throws Exception, InvalidJobException, io.mantisrx.runtime.command.InvalidJobException { final TestKit probe = new TestKit(system); Map<ScalingReason, Strategy> smap = new HashMap<>(); smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null)); smap.put(ScalingReason.DataDrop, new Strategy(ScalingReason.DataDrop, 0.0, 2.0, null)); SchedulingInfo sInfo = new SchedulingInfo.Builder() .numberOfStages(1) .multiWorkerScalableStageWithConstraints(2, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap)) .build(); String clusterName = "testJobScaleUp"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher); // send scale down request jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName+"-1",1, 1, "", ""), probe.getRef()); JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class); System.out.println("ScaleDownResp " + scaleResp.message); assertEquals(SUCCESS, scaleResp.responseCode); assertEquals(1,scaleResp.getActualNumWorkers()); verify(jobStoreMock, times(1)).storeNewJob(any()); // initial worker verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); // 9 for worker events + 1 for scale down verify(jobStoreMock, times(10)).updateWorker(any()); verify(jobStoreMock, times(3)).updateJob(any()); // 1 scale down verify(schedulerMock, times(1)).unscheduleAndTerminateWorker(any(), any()); // 1 job master + 2 workers verify(schedulerMock, times(3)).scheduleWorker(any()); } private void validateHost(Map<Integer, WorkerHost> hosts, int workerIdx, int workerNum, MantisJobState workerState) { assertTrue(hosts.containsKey(workerNum)); assertEquals(hosts.get(workerNum).getHost(), "host1"); assertEquals(hosts.get(workerNum).getState(), workerState); assertEquals(hosts.get(workerNum).getMetricsPort(), 8000); assertEquals(hosts.get(workerNum).getWorkerIndex(), workerIdx); assertEquals(hosts.get(workerNum).getWorkerNumber(), workerNum); assertEquals(hosts.get(workerNum).getPort(), Collections.singletonList(9020)); } // TODO fix for timing issues //@Test public void testSchedulingInfo() throws Exception { CountDownLatch latch = new CountDownLatch(11); List<JobSchedulingInfo> schedulingChangesList = new CopyOnWriteArrayList<>(); final TestKit probe = new TestKit(system); Map<ScalingReason, Strategy> smap = new HashMap<>(); smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null)); smap.put(ScalingReason.DataDrop, new Strategy(ScalingReason.DataDrop, 0.0, 2.0, null)); SchedulingInfo sInfo = new SchedulingInfo.Builder() .numberOfStages(1) .multiWorkerScalableStageWithConstraints(1, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap)) .build(); String clusterName = "testSchedulingInfo"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); CountDownLatch worker1Started = new CountDownLatch(1); ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher); JobId jobId = new JobId(clusterName, 1); JobClusterManagerProto.GetJobSchedInfoRequest getJobSchedInfoRequest = new JobClusterManagerProto.GetJobSchedInfoRequest(jobId); jobActor.tell(getJobSchedInfoRequest, probe.getRef()); JobClusterManagerProto.GetJobSchedInfoResponse resp = probe.expectMsgClass(JobClusterManagerProto.GetJobSchedInfoResponse.class); assertEquals(SUCCESS, resp.responseCode); assertTrue(resp.getJobSchedInfoSubject().isPresent()); ObjectMapper mapper = new ObjectMapper(); BehaviorSubject<JobSchedulingInfo> jobSchedulingInfoBehaviorSubject = resp.getJobSchedInfoSubject().get(); jobSchedulingInfoBehaviorSubject.doOnNext((js) -> { System.out.println("Got --> " + js.toString()); }) .map((e) -> { try { return mapper.writeValueAsString(e); } catch (JsonProcessingException e1) { e1.printStackTrace(); return "{\"error\":" + e1.getMessage() + "}"; } }) .map((js) -> { try { return mapper.readValue(js,JobSchedulingInfo.class); } catch (IOException e) { e.printStackTrace(); return null; } }) .filter((j) -> j!=null) .doOnNext((js) -> { // Map<Integer, WorkerAssignments> workerAssignments = js.getWorkerAssignments(); // WorkerAssignments workerAssignments1 = workerAssignments.get(1); // assertEquals(1, workerAssignments1.getNumWorkers()); // Map<Integer, WorkerHost> hosts = workerAssignments1.getHosts(); // // make sure worker number 1 exists // assertTrue(hosts.containsKey(1)); }) .doOnCompleted(() -> { System.out.println("SchedulingInfo completed"); System.out.println(schedulingChangesList.size() + " Sched changes received"); }) .observeOn(Schedulers.io()) .subscribe((js) -> { latch.countDown(); schedulingChangesList.add(js); }); // send scale up request jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(jobId.getId(), 1, 2, "", ""), probe.getRef()); JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class); System.out.println("ScaleupResp " + scaleResp.message); assertEquals(SUCCESS, scaleResp.responseCode); assertEquals(2,scaleResp.getActualNumWorkers()); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobActor,jobId.getId(),1,new WorkerId(jobId.getId(),1,3)); // worker gets lost JobTestHelper.sendWorkerTerminatedEvent(probe,jobActor,jobId.getId(),new WorkerId(jobId.getId(),1,3)); // Send replacement worker messages JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobActor,jobId.getId(),1,new WorkerId(jobId.getId(),1,4)); // scale down jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(jobId.getId(),1, 1, "", ""), probe.getRef()); JobClusterManagerProto.ScaleStageResponse scaleDownResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class); System.out.println("ScaleDownResp " + scaleDownResp.message); assertEquals(SUCCESS, scaleDownResp.responseCode); assertEquals(1,scaleDownResp.getActualNumWorkers()); // kill job jobActor.tell(new JobClusterProto.KillJobRequest(jobId,"killed", JobCompletedReason.Killed, "test", probe.getRef()),probe.getRef()); probe.expectMsgClass(JobClusterProto.KillJobResponse.class); for (JobSchedulingInfo jobSchedulingInfo : schedulingChangesList) { System.out.println(jobSchedulingInfo); } /* SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={ 0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}], 1=WorkerAssignments [stage=1, numWorkers=1, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}]}] SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={ 0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}], 1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}]}] SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={ 0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}], 1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]], 3=WorkerHost [state=Launched, workerIndex=1, host=host1, port=[9020]]}]}] SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={ 0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}], 1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]], 3=WorkerHost [state=StartInitiated, workerIndex=1, host=host1, port=[9020]]}]}] SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={ 0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}], 1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]], 3=WorkerHost [state=Started, workerIndex=1, host=host1, port=[9020]]}]}] SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={ 0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}], 1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}]}] SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={ 0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}], 1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]], 4=WorkerHost [state=Launched, workerIndex=1, host=host1, port=[9020]]}]}] SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={ 0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}], 1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]], 4=WorkerHost [state=StartInitiated, workerIndex=1, host=host1, port=[9020]]}]}] SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={ 0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}], 1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]], 4=WorkerHost [state=Started, workerIndex=1, host=host1, port=[9020]]}]}] SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={ 0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}], 1=WorkerAssignments [stage=1, numWorkers=1, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]} ]}] */ latch.await(1000, TimeUnit.SECONDS); System.out.println("---->Verifying scheduling changes " + schedulingChangesList.size()); assertEquals(11, schedulingChangesList.size()); for(int i = 0;i < schedulingChangesList.size(); i++) { JobSchedulingInfo js = schedulingChangesList.get(i); // jobid is correct assertEquals(jobId.getId(),js.getJobId()); Map<Integer, WorkerAssignments> workerAssignments = js.getWorkerAssignments(); //has info about stage 1 System.out.println("WorkerAssignments -> " + workerAssignments); //assertTrue(workerAssignments.containsKey(1)); switch(i) { case 0: WorkerAssignments wa0 = workerAssignments.get(1); assertEquals(1, wa0.getNumWorkers()); Map<Integer, WorkerHost> hosts0 = wa0.getHosts(); // make sure worker number 2 exists validateHost(hosts0, 0, 2, MantisJobState.Started); break; // scale up by 1 case 1: WorkerAssignments wa1 = workerAssignments.get(1); assertEquals(2, wa1.getNumWorkers()); Map<Integer, WorkerHost> hosts1 = wa1.getHosts(); assertEquals(1, hosts1.size()); // first update has only numWorkers updated but the new worker is still in Accepted state, so no host entry for it validateHost(hosts1, 0, 2, MantisJobState.Started); assertFalse(hosts1.containsKey(3)); break; case 2: WorkerAssignments wa2 = workerAssignments.get(1); assertEquals(2, wa2.getNumWorkers()); Map<Integer, WorkerHost> hosts2 = wa2.getHosts(); assertEquals(2, hosts2.size()); // next update should have both numWorkers and the new worker in Launched state validateHost(hosts2, 0, 2, MantisJobState.Started); validateHost(hosts2, 1, 3, MantisJobState.Launched); break; case 3: WorkerAssignments wa3 = workerAssignments.get(1); assertEquals(2, wa3.getNumWorkers()); Map<Integer, WorkerHost> hosts3 = wa3.getHosts(); assertEquals(2, hosts3.size()); // this update is for new worker in StartInit state validateHost(hosts3, 0, 2, MantisJobState.Started); validateHost(hosts3, 1, 3, MantisJobState.StartInitiated); break; case 4: WorkerAssignments wa4 = workerAssignments.get(1); assertEquals(2, wa4.getNumWorkers()); Map<Integer, WorkerHost> hosts4 = wa4.getHosts(); assertEquals(2, hosts4.size()); // this update is for new worker in Started state validateHost(hosts4, 0, 2, MantisJobState.Started); validateHost(hosts4, 1, 3, MantisJobState.Started); break; case 5: // worker 3 is lost and should be resubmitted WorkerAssignments wa5 = workerAssignments.get(1); assertEquals(2, wa5.getNumWorkers()); Map<Integer, WorkerHost> hosts5 = wa5.getHosts(); assertEquals(1, hosts5.size()); validateHost(hosts5, 0, 2, MantisJobState.Started); assertFalse(hosts5.containsKey(3)); break; case 6: // worker 3 is replaced by worker num 4 WorkerAssignments wa6 = workerAssignments.get(1); assertEquals(2, wa6.getNumWorkers()); Map<Integer, WorkerHost> hosts6 = wa6.getHosts(); // this update should have both numWorkers and the new worker in Launched state assertEquals(2, hosts6.size()); validateHost(hosts6, 0, 2, MantisJobState.Started); validateHost(hosts6, 1, 4, MantisJobState.Launched); break; case 7: WorkerAssignments wa7 = workerAssignments.get(1); assertEquals(2, wa7.getNumWorkers()); Map<Integer, WorkerHost> hosts7 = wa7.getHosts(); // update for new worker in StartInit state assertEquals(2, hosts7.size()); validateHost(hosts7, 0, 2, MantisJobState.Started); validateHost(hosts7, 1, 4, MantisJobState.StartInitiated); break; case 8: WorkerAssignments wa8 = workerAssignments.get(1); assertEquals(2, wa8.getNumWorkers()); Map<Integer, WorkerHost> hosts8 = wa8.getHosts(); // update for new worker in Started state assertEquals(2, hosts8.size()); validateHost(hosts8, 0, 2, MantisJobState.Started); validateHost(hosts8, 1, 4, MantisJobState.Started); break; case 9: // scale down, worker 4 should be gone now and numWorkers set to 1 WorkerAssignments wa9 = workerAssignments.get(1); assertEquals(1, wa9.getNumWorkers()); Map<Integer, WorkerHost> hosts9 = wa9.getHosts(); assertTrue(hosts9.containsKey(2)); assertEquals(1, hosts9.size()); validateHost(hosts9, 0, 2, MantisJobState.Started); break; case 10: // job has been killed assertTrue(workerAssignments.isEmpty()); break; default: fail(); } } // // verify(jobStoreMock, times(1)).storeNewJob(any()); // // initial worker // verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); // // //scale up worker // verify(jobStoreMock, times(1)).storeNewWorker(any()); // // // verify(jobStoreMock, times(17)).updateWorker(any()); // // verify(jobStoreMock, times(3)).updateJob(any()); // // // initial worker + job master and scale up worker + resubmit // verify(schedulerMock, times(4)).scheduleWorker(any()); // // verify(schedulerMock, times(4)).unscheduleAndTerminateWorker(any(), any()); } @Test public void testJobScaleUpFailsIfNoScaleStrategy() throws Exception { final TestKit probe = new TestKit(system); Map<ScalingReason, Strategy> smap = new HashMap<>(); SchedulingInfo sInfo = new SchedulingInfo.Builder() .numberOfStages(1) .multiWorkerScalableStageWithConstraints(1, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap)) .build(); String clusterName = "testJobScaleUpFailsIfNoScaleStrategy"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher); // send scale up request jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName+"-1",1, 2, "", ""), probe.getRef()); JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class); System.out.println("ScaleupResp " + scaleResp.message); assertEquals(CLIENT_ERROR, scaleResp.responseCode); assertEquals(0, scaleResp.getActualNumWorkers()); verify(jobStoreMock, times(1)).storeNewJob(any()); // initial worker verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); //no scale up worker happened verify(jobStoreMock, times(0)).storeNewWorker(any()); verify(jobStoreMock, times(3)).updateWorker(any()); verify(jobStoreMock, times(3)).updateJob(any()); // initial worker only verify(schedulerMock, times(1)).scheduleWorker(any()); } @Test public void testJobScaleUpFailsIfMinEqualsMax() throws Exception { final TestKit probe = new TestKit(system); Map<ScalingReason, Strategy> smap = new HashMap<>(); SchedulingInfo sInfo = new SchedulingInfo.Builder() .numberOfStages(1) .multiWorkerScalableStageWithConstraints(1, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 1, 1, 1, 1, 0, smap)) .build(); String clusterName = "testJobScaleUpFailsIfNoScaleStrategy"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher); // send scale up request jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName + "-1",1, 3, "", ""), probe.getRef()); JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class); System.out.println("ScaleupResp " + scaleResp.message); assertEquals(CLIENT_ERROR, scaleResp.responseCode); assertEquals(0, scaleResp.getActualNumWorkers()); verify(jobStoreMock, times(1)).storeNewJob(any()); // initial worker verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); //no scale up worker happened verify(jobStoreMock, times(0)).storeNewWorker(any()); verify(jobStoreMock, times(3)).updateWorker(any()); verify(jobStoreMock, times(3)).updateJob(any()); // initial worker only verify(schedulerMock, times(1)).scheduleWorker(any()); } @Test public void stageScalingPolicyTest() { int stageNo = 1; int min = 0; int max = 10; int increment = 1; int decrement = 1; long cooldownsecs = 300; Map<ScalingReason, Strategy> smap = new HashMap<>(); smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null)); StageScalingPolicy ssp = new StageScalingPolicy(stageNo, min, max, increment, decrement, cooldownsecs, smap); assertTrue(ssp.isEnabled()); } @Test public void stageScalingPolicyNoStrategyTest() { int stageNo = 1; int min = 0; int max = 10; int increment = 1; int decrement = 1; long cooldownsecs = 300; Map<ScalingReason, Strategy> smap = new HashMap<>(); StageScalingPolicy ssp = new StageScalingPolicy(stageNo, min, max, increment, decrement, cooldownsecs, smap); assertFalse(ssp.isEnabled()); } @Test public void stageScalingPolicyMinEqMaxTest() { int stageNo = 1; int min = 10; int max = 10; int increment = 1; int decrement = 1; long cooldownsecs = 300; Map<ScalingReason, Strategy> smap = new HashMap<>(); smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null)); StageScalingPolicy ssp = new StageScalingPolicy(stageNo, min, max, increment, decrement, cooldownsecs, smap); assertFalse(ssp.isEnabled()); } @Test public void stageScalingPolicyMinGreaterThanMaxTest() { int stageNo = 1; int min = 10; int max = 1; int increment = 1; int decrement = 1; long cooldownsecs = 300; Map<ScalingReason, Strategy> smap = new HashMap<>(); smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null)); StageScalingPolicy ssp = new StageScalingPolicy(stageNo, min, max, increment, decrement, cooldownsecs, smap); assertTrue(ssp.isEnabled()); // max will be set equal to min assertEquals(10, ssp.getMax()); } }
4,224
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/job/JobTestLifecycle.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS; import static java.util.Optional.empty; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import java.io.IOException; import java.net.URL; import java.time.Instant; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.events.*; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsResponse; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.runtime.descriptor.StageSchedulingInfo; import io.mantisrx.server.master.persistence.IMantisStorageProvider; import io.mantisrx.server.master.persistence.MantisStorageProviderAdapter; import io.mantisrx.server.master.scheduler.ScheduleRequest; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.InOrder; import org.mockito.Mockito; import io.mantisrx.shaded.com.google.common.collect.Lists; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.actor.PoisonPill; import akka.testkit.javadsl.TestKit; import io.mantisrx.master.jobcluster.job.JobActor.WorkerNumberGenerator; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.jobcluster.proto.JobClusterProto; import io.mantisrx.master.jobcluster.proto.JobProto; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.command.InvalidJobException; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.domain.IJobClusterDefinition; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.core.domain.JobMetadata; public class JobTestLifecycle { static ActorSystem system; private static MantisJobStore jobStore; private static IMantisStorageProvider storageProvider; private static LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); private static final String user = "mantis"; @BeforeClass public static void setup() { system = ActorSystem.create(); TestHelpers.setupMasterConfig(); storageProvider = new MantisStorageProviderAdapter(new io.mantisrx.server.master.store.SimpleCachedFileStorageProvider(), eventPublisher); jobStore = new MantisJobStore(storageProvider); } @AfterClass public static void tearDown() { JobTestHelper.deleteAllFiles(); TestKit.shutdownActorSystem(system); system = null; } @Test public void testJobSubmitWithoutInit() { final TestKit probe = new TestKit(system); String clusterName = "testJobSubmitCluster"; IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName); JobDefinition jobDefn; try { jobDefn = JobTestHelper.generateJobDefinition(clusterName); MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName,1)) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefn) .build(); final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher)); String jobId = clusterName + "-1"; jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println(resp.message); assertEquals(CLIENT_ERROR, resp.responseCode); } catch(Exception e) { e.printStackTrace(); } } @Test public void testJobSubmit() { final TestKit probe = new TestKit(system); String clusterName = "testJobSubmitCluster"; IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName); JobDefinition jobDefn; try { jobDefn = JobTestHelper.generateJobDefinition(clusterName); // IMantisStorageProvider storageProvider = new SimpleCachedFileStorageProvider(); // MantisJobStore jobStore = new MantisJobStore(storageProvider); MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName,1)) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefn) .build(); final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher)); jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class); assertEquals(SUCCESS, initMsg.responseCode); String jobId = clusterName + "-1"; jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp + " msg " + resp.message); assertEquals(SUCCESS, resp.responseCode); assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState()); assertTrue(resp.getJobMetadata().get().getStageMetadata(1).isPresent()); // send launched event WorkerId workerId = new WorkerId(jobId, 0, 1); int stageNum = 1; JobTestHelper.sendWorkerLaunchedEvent(probe, jobActor, workerId, stageNum); JobTestHelper.sendStartInitiatedEvent(probe, jobActor, stageNum, workerId); // send heartbeat JobTestHelper.sendHeartBeat(probe, jobActor, jobId, stageNum, workerId); // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp2 + " msg " + resp2.message); assertEquals(SUCCESS, resp2.responseCode); assertEquals(JobState.Launched,resp2.getJobMetadata().get().getState()); verify(jobStoreMock, times(1)).storeNewJob(any()); verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); verify(jobStoreMock, times(3)).updateWorker(any()); verify(jobStoreMock, times(3)).updateJob(any()); //assertEquals(jobActor, probe.getLastSender()); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void testJobSubmitPerpetual() { final TestKit probe = new TestKit(system); String clusterName = "testJobSubmitPerpetual"; IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName); JobDefinition jobDefn; try { MachineDefinition machineDefinition = new MachineDefinition(1.0, 1.0, 1.0, 1.0, 3); SchedulingInfo schedInfo = new SchedulingInfo.Builder() .numberOfStages(1) .singleWorkerStageWithConstraints(machineDefinition, Lists.newArrayList(), Lists.newArrayList()).build(); jobDefn = new JobDefinition.Builder() .withName(clusterName) .withParameters(Lists.newArrayList()) .withLabels(Lists.newArrayList()) .withSchedulingInfo(schedInfo) .withArtifactName("myart") .withSubscriptionTimeoutSecs(30) .withUser("njoshi") .withNumberOfStages(schedInfo.getStages().size()) .withJobSla(new JobSla(0, 0, null, MantisJobDurationType.Perpetual, null)) .build(); MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName,1)) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefn) .build(); final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher)); jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class); assertEquals(SUCCESS, initMsg.responseCode); String jobId = clusterName + "-1"; jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp + " msg " + resp.message); assertEquals(SUCCESS, resp.responseCode); assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState()); assertTrue(resp.getJobMetadata().get().getStageMetadata(1).isPresent()); // send launched event WorkerId workerId = new WorkerId(jobId, 0, 1); int stageNum = 1; JobTestHelper.sendWorkerLaunchedEvent(probe, jobActor, workerId, stageNum); JobTestHelper.sendStartInitiatedEvent(probe, jobActor, stageNum, workerId); // send heartbeat JobTestHelper.sendHeartBeat(probe, jobActor, jobId, stageNum, workerId); // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp2 + " msg " + resp2.message); assertEquals(SUCCESS, resp2.responseCode); assertEquals(JobState.Launched,resp2.getJobMetadata().get().getState()); verify(jobStoreMock, times(1)).storeNewJob(any()); verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); verify(jobStoreMock, times(3)).updateWorker(any()); verify(jobStoreMock, times(3)).updateJob(any()); //verify(jobStoreMock, times(3)) verify(schedulerMock,times(1)).scheduleWorker(any()); JobMetadata jobMetadata = new JobMetadata(jobId, new URL("http://myart" + ""),1,"njoshi",schedInfo,Lists.newArrayList(),0,0); ScheduleRequest expectedScheduleRequest = new ScheduleRequest(workerId, 1,4, jobMetadata,MantisJobDurationType.Perpetual,machineDefinition,Lists.newArrayList(),Lists.newArrayList(),0,empty()); verify(schedulerMock).scheduleWorker(expectedScheduleRequest); //assertEquals(jobActor, probe.getLastSender()); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void testJobSubmitInitalizationFails() { final TestKit probe = new TestKit(system); String clusterName = "testJobSubmitPersistenceFails"; IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName); JobDefinition jobDefn; try { jobDefn = JobTestHelper.generateJobDefinition(clusterName); MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any()); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName,1)) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefn) .build(); final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher)); jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class); assertEquals(SERVER_ERROR, initMsg.responseCode); System.out.println(initMsg.message); String jobId = clusterName + "-1"; jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp + " msg " + resp.message); assertEquals(CLIENT_ERROR, resp.responseCode); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void testJobSubmitWithMultipleWorkers() { final TestKit probe = new TestKit(system); String clusterName = "testJobSubmitWithMultipleWorkersCluster"; IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName); JobDefinition jobDefn; try { SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerStageWithConstraints(2, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList()).build(); jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo); MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName,2)) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefn) .build(); final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher)); jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class); assertEquals(SUCCESS, initMsg.responseCode); String jobId = clusterName + "-2"; jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp + " msg " + resp.message); assertEquals(SUCCESS, resp.responseCode); assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState()); int stageNo = 1; // send launched event WorkerId workerId = new WorkerId(jobId, 0, 1); // send heartbeat JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId); // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp2 + " msg " + resp2.message); assertEquals(SUCCESS, resp2.responseCode); // Only 1 worker has started. assertEquals(JobState.Accepted,resp2.getJobMetadata().get().getState()); // send launched event WorkerId workerId2 = new WorkerId(jobId, 1, 2); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId2); // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp3 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp3 + " msg " + resp3.message); assertEquals(SUCCESS, resp3.responseCode); // 2 worker have started so job should be started. assertEquals(JobState.Launched,resp3.getJobMetadata().get().getState()); verify(jobStoreMock, times(1)).storeNewJob(any()); verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); verify(jobStoreMock, times(6)).updateWorker(any()); verify(jobStoreMock, times(3)).updateJob(any()); //assertEquals(jobActor, probe.getLastSender()); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void testJobSubmitWithMultipleStagesAndWorkers() { final TestKit probe = new TestKit(system); String clusterName = "testJobSubmitWithMultipleStagesAndWorkers"; IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName); JobDefinition jobDefn; try { Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>(); smap.put(StageScalingPolicy.ScalingReason.Memory, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.Memory, 0.1, 0.6, null)); SchedulingInfo.Builder builder = new SchedulingInfo.Builder() .numberOfStages(2) .multiWorkerScalableStageWithConstraints( 2, new MachineDefinition(1, 1.24, 0.0, 1, 1), null, null, new StageScalingPolicy(1, 1, 3, 1, 1, 60, smap) ) .multiWorkerScalableStageWithConstraints( 3, new MachineDefinition(1, 1.24, 0.0, 1, 1), null, null, new StageScalingPolicy(1, 1, 3, 1, 1, 60, smap) ); SchedulingInfo sInfo = builder.build(); System.out.println("SchedulingInfo " + sInfo); jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo); MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName,1)) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefn) .build(); final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher)); jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class); assertEquals(SUCCESS, initMsg.responseCode); String jobId = clusterName + "-1"; jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp + " msg " + resp.message); assertEquals(SUCCESS, resp.responseCode); assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState()); int stageNo = 0; // send launched event WorkerId workerId = new WorkerId(jobId, 0, 1); // send heartbeat JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId); // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp2 + " msg " + resp2.message); assertEquals(SUCCESS, resp2.responseCode); // Only 1 worker has started. assertEquals(JobState.Accepted,resp2.getJobMetadata().get().getState()); // send launched events for the rest of the workers int nextWorkerNumber = 1; int stage = 0; Iterator<Map.Entry<Integer, StageSchedulingInfo>> it = sInfo.getStages().entrySet().iterator(); while(it.hasNext()) { Map.Entry<Integer, StageSchedulingInfo> integerStageSchedulingInfoEntry = it.next(); StageSchedulingInfo stageSchedulingInfo = integerStageSchedulingInfoEntry.getValue(); System.out.println("Workers -> " + stageSchedulingInfo.getNumberOfInstances() + " in stage " + stage); for(int i=0; i<stageSchedulingInfo.getNumberOfInstances(); i++) { WorkerId wId = new WorkerId(jobId, i, nextWorkerNumber++); System.out.println("Sending events for worker --> " + wId + " Stage " + stage); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stage, wId); } stage++; } // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp3 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp3 + " msg " + resp3.message); assertEquals(SUCCESS, resp3.responseCode); // 2 worker have started so job should be started. assertEquals(JobState.Launched,resp3.getJobMetadata().get().getState()); verify(jobStoreMock, times(1)).storeNewJob(any()); verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); verify(jobStoreMock, times(19)).updateWorker(any()); verify(jobStoreMock, times(3)).updateJob(any()); //assertEquals(jobActor, probe.getLastSender()); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void testListActiveWorkers() { final TestKit probe = new TestKit(system); String clusterName = "testListActiveWorkers"; IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName); JobDefinition jobDefn; try { SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerStageWithConstraints(2, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList()).build(); jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo); MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName,2)) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefn) .build(); final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher)); jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class); assertEquals(SUCCESS, initMsg.responseCode); String jobId = clusterName + "-2"; jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp + " msg " + resp.message); assertEquals(SUCCESS, resp.responseCode); assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState()); int stageNo = 1; // send launched event WorkerId workerId = new WorkerId(jobId, 0, 1); // send heartbeat JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId); // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp2 + " msg " + resp2.message); assertEquals(SUCCESS, resp2.responseCode); // Only 1 worker has started. assertEquals(JobState.Accepted,resp2.getJobMetadata().get().getState()); // send launched event WorkerId workerId2 = new WorkerId(jobId, 1, 2); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId2); // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp3 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp3 + " msg " + resp3.message); assertEquals(SUCCESS, resp3.responseCode); // 2 worker have started so job should be started. assertEquals(JobState.Launched,resp3.getJobMetadata().get().getState()); jobActor.tell(new JobClusterManagerProto.ListWorkersRequest(new JobId(clusterName, 1)),probe.getRef()); JobClusterManagerProto.ListWorkersResponse listWorkersResponse = probe.expectMsgClass(JobClusterManagerProto.ListWorkersResponse.class); assertEquals(2, listWorkersResponse.getWorkerMetadata().size()); int cnt = 0; for(IMantisWorkerMetadata workerMeta : listWorkersResponse.getWorkerMetadata()) { if(workerMeta.getWorkerNumber() == 1 || workerMeta.getWorkerNumber() == 2) { cnt ++; } } assertEquals(2, cnt); verify(jobStoreMock, times(1)).storeNewJob(any()); verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); verify(jobStoreMock, times(6)).updateWorker(any()); verify(jobStoreMock, times(3)).updateJob(any()); //assertEquals(jobActor, probe.getLastSender()); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void testkill() throws Exception { final TestKit probe = new TestKit(system); String clusterName = "testKillCluster"; IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName); JobDefinition jobDefn = JobTestHelper.generateJobDefinition(clusterName); MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName,3)) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefn) .build(); final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher)); jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); probe.expectMsgClass(JobProto.JobInitialized.class); probe.watch(jobActor); JobId jId = new JobId(clusterName,3); jobActor.tell(new JobClusterProto.KillJobRequest( jId, "test reason", JobCompletedReason.Normal, "nj", probe.getRef()), probe.getRef()); probe.expectMsgClass(JobClusterProto.KillJobResponse.class); JobTestHelper.sendWorkerTerminatedEvent(probe,jobActor,jId.getId(),new WorkerId(jId.getId(),0,1)); Thread.sleep(1000); verify(schedulerMock, times(1)).unscheduleAndTerminateWorker(any(), any()); verify(schedulerMock, times(1)).scheduleWorker(any()); verify(jobStoreMock, times(1)).storeNewJob(any()); verify(jobStoreMock, times(1)).storeNewWorkers(any(),any()); verify(jobStoreMock, times(2)).updateJob(any()); //verify(jobStoreMock, times(1)).updateWorker(any()); jobActor.tell(PoisonPill.getInstance(), ActorRef.noSender()); probe.expectTerminated(jobActor); } @Test public void testHeartBeatEnforcement() { final TestKit probe = new TestKit(system); String clusterName= "testHeartBeatEnforcementCluster"; IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName); JobDefinition jobDefn; try { SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerStageWithConstraints(2, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList()).build(); jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo); MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName,2)) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefn) .build(); final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher)); jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class); assertEquals(SUCCESS, initMsg.responseCode); String jobId = clusterName + "-2"; jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp + " msg " + resp.message); assertEquals(SUCCESS, resp.responseCode); assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState()); int stageNo = 1; WorkerId workerId = new WorkerId(jobId, 0, 1); // send Launched, Initiated and heartbeat JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId); // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp2 + " msg " + resp2.message); assertEquals(SUCCESS, resp2.responseCode); // Only 1 worker has started. assertEquals(JobState.Accepted,resp2.getJobMetadata().get().getState()); // send launched event WorkerId workerId2 = new WorkerId(jobId, 1, 2); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId2); // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp3 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp3 + " msg " + resp3.message); assertEquals(SUCCESS, resp3.responseCode); // 2 worker have started so job should be started. assertEquals(JobState.Launched,resp3.getJobMetadata().get().getState()); JobTestHelper.sendHeartBeat(probe,jobActor,jobId,1,workerId2); JobTestHelper.sendHeartBeat(probe,jobActor,jobId,1,workerId); // check hb status in the future where we expect all last HBs to be stale. Instant now = Instant.now(); jobActor.tell(new JobProto.CheckHeartBeat(now.plusSeconds(240)), probe.getRef()); Thread.sleep(1000); // 2 original submissions and 2 resubmits because of HB timeouts verify(schedulerMock, times(4)).scheduleWorker(any()); // 2 kills due to resubmits verify(schedulerMock, times(2)).unscheduleAndTerminateWorker(any(), any()); //assertEquals(jobActor, probe.getLastSender()); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } } // @Test public void testLostWorkerGetsReplaced() { final TestKit probe = new TestKit(system); String clusterName= "testLostWorkerGetsReplaced"; IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName); ActorRef jobActor = null; JobDefinition jobDefn; try { SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerStageWithConstraints(2, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList()).build(); jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo); MantisScheduler schedulerMock = mock(MantisScheduler.class); //MantisJobStore jobStoreMock = mock(MantisJobStore.class); MantisJobStore jobStoreSpied = Mockito.spy(jobStore); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName,2)) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefn) .build(); jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreSpied, schedulerMock, eventPublisher)); jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class); assertEquals(SUCCESS, initMsg.responseCode); String jobId = clusterName + "-2"; jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp + " msg " + resp.message); assertEquals(SUCCESS, resp.responseCode); assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState()); int stageNo = 1; // send launched event WorkerId workerId = new WorkerId(jobId, 0, 1); // send heartbeat JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId); // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp2 + " msg " + resp2.message); assertEquals(SUCCESS, resp2.responseCode); // Only 1 worker has started. assertEquals(JobState.Accepted,resp2.getJobMetadata().get().getState()); // send launched event WorkerId workerId2 = new WorkerId(jobId, 1, 2); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId2); // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); GetJobDetailsResponse resp3 = probe.expectMsgClass(GetJobDetailsResponse.class); System.out.println("resp " + resp3 + " msg " + resp3.message); assertEquals(SUCCESS, resp3.responseCode); // 2 worker have started so job should be started. assertEquals(JobState.Launched,resp3.getJobMetadata().get().getState()); // worker 2 gets terminated abnormally JobTestHelper.sendWorkerTerminatedEvent(probe, jobActor, jobId, workerId2); // replaced worker comes up and sends events WorkerId workerId2_replaced = new WorkerId(jobId, 1, 3); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId2_replaced); jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef()); GetJobDetailsResponse resp4 = probe.expectMsgClass(GetJobDetailsResponse.class); IMantisJobMetadata jobMeta = resp4.getJobMetadata().get(); Map<Integer, ? extends IMantisStageMetadata> stageMetadata = jobMeta.getStageMetadata(); IMantisStageMetadata stage = stageMetadata.get(1); for (JobWorker worker : stage.getAllWorkers()) { System.out.println("worker -> " + worker.getMetadata()); } // 2 initial schedules and 1 replacement verify(schedulerMock, timeout(1_000).times(3)).scheduleWorker(any()); // archive worker should get called once for the dead worker // verify(jobStoreMock, timeout(1_000).times(1)).archiveWorker(any()); Mockito.verify(jobStoreSpied).archiveWorker(any()); //assertEquals(jobActor, probe.getLastSender()); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } catch (Exception e) { e.printStackTrace(); fail(); } finally { system.stop(jobActor); } } @Test public void workerNumberGeneratorInvalidArgsTest() { try { WorkerNumberGenerator wng = new WorkerNumberGenerator(-1, 10); fail(); } catch(Exception e) { } try { WorkerNumberGenerator wng = new WorkerNumberGenerator(0, 0); fail(); } catch(Exception e) { } } @Test public void workerNumberGeneratorTest() { MantisJobMetadataImpl mantisJobMetaMock = mock(MantisJobMetadataImpl.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); int incrementStep = 10; WorkerNumberGenerator wng = new WorkerNumberGenerator(0, incrementStep); for(int i=1; i<incrementStep; i++) { assertEquals(i, wng.getNextWorkerNumber(mantisJobMetaMock, jobStoreMock)); } try { verify(mantisJobMetaMock,times(1)).setNextWorkerNumberToUse(incrementStep, jobStoreMock); // verify(jobStoreMock, times(1)).updateJob(any()); } catch ( Exception e) { e.printStackTrace(); fail(); } } @Test public void workerNumberGeneratorWithNonZeroLastUsedTest() { MantisJobMetadataImpl mantisJobMetaMock = mock(MantisJobMetadataImpl.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); int incrementStep = 10; int lastNumber = 7; WorkerNumberGenerator wng = new WorkerNumberGenerator(lastNumber, incrementStep); for(int i=lastNumber+1; i<incrementStep; i++) { assertEquals(i, wng.getNextWorkerNumber(mantisJobMetaMock, jobStoreMock)); } try { verify(mantisJobMetaMock,times(1)).setNextWorkerNumberToUse(lastNumber + incrementStep, jobStoreMock); //verify(jobStoreMock,times(1)).updateJob(any()); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void workerNumberGeneratorTest2() { MantisJobMetadataImpl mantisJobMetaMock = mock(MantisJobMetadataImpl.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); WorkerNumberGenerator wng = new WorkerNumberGenerator(); for(int i=1; i<20; i++) { assertEquals(i, wng.getNextWorkerNumber(mantisJobMetaMock, jobStoreMock)); } try { InOrder inOrder = Mockito.inOrder(mantisJobMetaMock); inOrder.verify(mantisJobMetaMock).setNextWorkerNumberToUse(10, jobStoreMock); inOrder.verify(mantisJobMetaMock).setNextWorkerNumberToUse(20, jobStoreMock); //verify(jobStoreMock, times(2)).updateJob(any()); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void workerNumberGeneratorUpdatesStoreTest2() { //MantisJobMetadataImpl mantisJobMetaMock = mock(MantisJobMetadataImpl.class); JobDefinition jobDefnMock = mock(JobDefinition.class); MantisJobMetadataImpl mantisJobMeta = new MantisJobMetadataImpl(JobId.fromId("job-1").get(), Instant.now().toEpochMilli(),Instant.now().toEpochMilli(), jobDefnMock, JobState.Accepted, 0); MantisJobStore jobStoreMock = mock(MantisJobStore.class); WorkerNumberGenerator wng = new WorkerNumberGenerator(); for(int i=1; i<20; i++) { assertEquals(i, wng.getNextWorkerNumber(mantisJobMeta, jobStoreMock)); } try { //InOrder inOrder = Mockito.inOrder(mantisJobMetaMock); //inOrder.verify(mantisJobMetaMock).setNextWorkerNumberToUse(10, jobStoreMock); //inOrder.verify(mantisJobMetaMock).setNextWorkerNumberToUse(20, jobStoreMock); verify(jobStoreMock, times(2)).updateJob(any()); } catch (Exception e) { e.printStackTrace(); fail(); } } @Test public void workerNumberGeneratorExceptionUpdatingJobTest() { MantisJobMetadataImpl mantisJobMetaMock = mock(MantisJobMetadataImpl.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); WorkerNumberGenerator wng = new WorkerNumberGenerator(); try { Mockito.doThrow(IOException.class).when(jobStoreMock).updateJob(any()); wng.getNextWorkerNumber(mantisJobMetaMock, jobStoreMock); } catch(Exception e) { e.printStackTrace(); } } }
4,225
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/job/JobTestHelper.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.File; import java.time.Duration; import java.time.Instant; import java.util.Optional; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.testkit.javadsl.TestKit; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.common.WorkerPorts; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.master.jobcluster.job.worker.WorkerStatus; import io.mantisrx.master.jobcluster.job.worker.WorkerTerminate; import io.mantisrx.master.jobcluster.proto.BaseResponse; import io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.jobcluster.proto.JobClusterProto; import io.mantisrx.master.jobcluster.proto.JobProto; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.command.InvalidJobException; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.Status; import io.mantisrx.server.core.Status.TYPE; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.domain.IJobClusterDefinition; import io.mantisrx.server.master.domain.JobClusterConfig; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.scheduler.WorkerEvent; import io.mantisrx.server.master.scheduler.WorkerLaunched; import org.junit.Test; public class JobTestHelper { private final static String SPOOL_DIR = "/tmp/MantisSpool"; private final static String ARCHIVE_DIR = "/tmp/MantisArchive"; public static void createDirsIfRequired() { File spoolDir = new File(SPOOL_DIR); File namedJobsDir = new File(SPOOL_DIR + "/" + "namedJobs"); File archiveDir = new File(ARCHIVE_DIR); if (!spoolDir.exists()) { spoolDir.mkdir(); } if (!archiveDir.exists()) { archiveDir.mkdir(); } if (!namedJobsDir.exists()) { namedJobsDir.mkdir(); } } public static void deleteAllFiles() { try { File spoolDir = new File(SPOOL_DIR); File archiveDir = new File(ARCHIVE_DIR); deleteDir(spoolDir); deleteDir(archiveDir); } catch (Exception e) { } } private static void deleteDir(File dir) { if (dir != null) { for (File file : dir.listFiles()) { if (file.isDirectory()) { deleteDir(file); } else { boolean delete = file.delete(); } } } } public static IJobClusterDefinition generateJobClusterDefinition(String name, SchedulingInfo schedInfo) { return generateJobClusterDefinition(name, schedInfo, WorkerMigrationConfig.DEFAULT); } public static IJobClusterDefinition generateJobClusterDefinition(String name, SchedulingInfo schedInfo, WorkerMigrationConfig migrationConfig) { JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName("myart") .withSchedulingInfo(schedInfo) .withVersion("0.0.1") .build(); return new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(clusterConfig) .withName(name) .withUser("user") .withParameters(Lists.newArrayList()) .withIsReadyForJobMaster(true) .withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo")) .withMigrationConfig(migrationConfig) .build(); } public static IJobClusterDefinition generateJobClusterDefinition(String name) { return generateJobClusterDefinition(name, new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(new MachineDefinition(0, 0, 0, 0, 0), Lists.newArrayList(), Lists.newArrayList()).build()); } public static JobDefinition generateJobDefinition(String clusterName, SchedulingInfo schedInfo) throws InvalidJobException { return new JobDefinition.Builder() .withName(clusterName) .withParameters(Lists.newArrayList()) .withLabels(Lists.newArrayList()) .withSchedulingInfo(schedInfo) .withArtifactName("myart") .withSubscriptionTimeoutSecs(0) .withUser("njoshi") .withNumberOfStages(schedInfo.getStages().size()) .withJobSla(new JobSla(0, 0, null, MantisJobDurationType.Perpetual, null)) .build(); } public static JobDefinition generateJobDefinition(String clusterName) throws InvalidJobException { return generateJobDefinition(clusterName, new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(new MachineDefinition(1.0, 1.0, 1.0, 1.0, 3), Lists.newArrayList(), Lists.newArrayList()).build()); } public static void sendCheckHeartBeat(final TestKit probe, final ActorRef jobActor, Instant now) { jobActor.tell(new JobProto.CheckHeartBeat(now), probe.getRef()); } public static void sendHeartBeat(final TestKit probe, final ActorRef jobActor, String jobId, int stageNo, WorkerId workerId2) { sendHeartBeat(probe, jobActor, jobId, stageNo, workerId2, System.currentTimeMillis()); } public static void sendLaunchedInitiatedStartedEventsToWorker(final TestKit probe, final ActorRef jobActor, String jobId, int stageNo, WorkerId workerId2) { JobTestHelper.sendWorkerLaunchedEvent(probe, jobActor, workerId2, stageNo); JobTestHelper.sendStartInitiatedEvent(probe, jobActor, stageNo, workerId2); // send started JobTestHelper.sendStartedEvent(probe, jobActor, stageNo, workerId2); } // public static void sendLaunchedInitiatedStartedEventsToWorker(final TestKit probe, final ActorRef jobActor, String jobId, // int stageNo, WorkerId workerId2) { // sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId2, System.currentTimeMillis() + 1000); // } public static void sendHeartBeat(final TestKit probe, final ActorRef jobActor, String jobId, int stageNo, WorkerId workerId2, long time) { WorkerEvent heartBeat2 = new WorkerHeartbeat(new Status(jobId, stageNo, workerId2.getWorkerIndex(), workerId2.getWorkerNum(), TYPE.HEARTBEAT, "", MantisJobState.Started, time)); jobActor.tell(heartBeat2, probe.getRef()); } public static void sendWorkerTerminatedEvent(final TestKit probe, final ActorRef jobActor, String jobId, WorkerId workerId2) { WorkerEvent workerTerminated = new WorkerTerminate(workerId2, WorkerState.Failed, JobCompletedReason.Lost); jobActor.tell(workerTerminated, probe.getRef()); } public static void sendWorkerCompletedEvent(final TestKit probe, final ActorRef jobActor, String jobId, WorkerId workerId2) { WorkerEvent workerCompleted = new WorkerTerminate(workerId2, WorkerState.Completed, JobCompletedReason.Normal); jobActor.tell(workerCompleted, probe.getRef()); } public static void sendStartInitiatedEvent(final TestKit probe, final ActorRef jobActor, final int stageNum, WorkerId workerId) { WorkerEvent startInitEvent = new WorkerStatus(new Status( workerId.getJobId(), stageNum, workerId.getWorkerIndex(), workerId.getWorkerNum(), TYPE.INFO, "test START_INITIATED event", MantisJobState.StartInitiated )); jobActor.tell(startInitEvent, probe.getRef()); } public static void sendStartedEvent(final TestKit probe, final ActorRef jobActor, final int stageNum, WorkerId workerId) { WorkerEvent startedEvent = new WorkerStatus(new Status( workerId.getJobId(), stageNum, workerId.getWorkerIndex(), workerId.getWorkerNum(), TYPE.INFO, "test STARTED event", MantisJobState.Started )); jobActor.tell(startedEvent, probe.getRef()); } public static void sendJobInitializeEvent(final TestKit probe, final ActorRef jobClusterActor) { JobProto.InitJob initJobEvent = new JobProto.InitJob(probe.getRef(), true); jobClusterActor.tell(initJobEvent, probe.getRef()); } public static void sendWorkerLaunchedEvent(final TestKit probe, final ActorRef jobActor, WorkerId workerId2, int stageNo) { WorkerEvent launchedEvent2 = new WorkerLaunched(workerId2, stageNo, "host1", "vm1", Optional.empty(), new WorkerPorts(Lists.newArrayList(8000, 9000, 9010, 9020, 9030))); jobActor.tell(launchedEvent2, probe.getRef()); } public static void killJobAndVerify(final TestKit probe, String clusterName, JobId jobId, ActorRef jobClusterActor) { jobClusterActor.tell(new JobClusterProto.KillJobRequest(jobId, "test reason", JobCompletedReason.Normal, "nj", probe.getRef()), probe.getRef()); JobClusterManagerProto.KillJobResponse killJobResp = probe.expectMsgClass(JobClusterManagerProto.KillJobResponse.class); assertEquals(SUCCESS, killJobResp.responseCode); } public static void killJobSendWorkerTerminatedAndVerify(final TestKit probe, String clusterName, JobId jobId, ActorRef jobClusterActor, WorkerId workerId) { jobClusterActor.tell(new JobClusterProto.KillJobRequest(jobId, "test reason", JobCompletedReason.Normal, "nj", probe.getRef()), probe.getRef()); JobClusterManagerProto.KillJobResponse killJobResp = probe.expectMsgClass(JobClusterManagerProto.KillJobResponse.class); sendWorkerTerminatedEvent(probe, jobClusterActor, jobId.getId(), workerId); assertEquals(SUCCESS, killJobResp.responseCode); } public static void getJobDetailsAndVerify(final TestKit probe, ActorRef jobClusterActor, String jobId, BaseResponse.ResponseCode expectedRespCode, JobState expectedState) { jobClusterActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef()); JobClusterManagerProto.GetJobDetailsResponse detailsResp = probe.expectMsgClass(Duration.ofSeconds(60), JobClusterManagerProto.GetJobDetailsResponse.class); if (expectedRespCode == SUCCESS) { assertEquals(SUCCESS, detailsResp.responseCode); assertTrue(detailsResp.getJobMetadata().isPresent()); assertEquals(jobId, detailsResp.getJobMetadata().get().getJobId().getId()); assertEquals(expectedState, detailsResp.getJobMetadata().get().getState()); } else { assertEquals(expectedRespCode, detailsResp.responseCode); assertFalse(detailsResp.getJobMetadata().isPresent()); } } public static boolean verifyJobStatusWithPolling(final TestKit probe, final ActorRef actorRef, final String jobId1, final JobState expectedState) { boolean result = false; int cnt = 0; // try a few times for timing issue while (cnt < 100 || !result) { cnt++; actorRef.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", JobId.fromId(jobId1).get()), probe.getRef()); JobClusterManagerProto.GetJobDetailsResponse detailsResp = probe.expectMsgClass(JobClusterManagerProto.GetJobDetailsResponse.class); if (detailsResp.getJobMetadata().isPresent() && expectedState.equals(detailsResp.getJobMetadata().get().getState())) { result = true; break; } } return result; } public static void submitJobAndVerifySuccess(final TestKit probe, String clusterName, ActorRef jobClusterActor, final JobDefinition jobDefn, String jobId) { submitJobAndVerifyStatus(probe, clusterName, jobClusterActor, jobDefn, jobId, SUCCESS); } public static void submitJobAndVerifyStatus(final TestKit probe, String clusterName, ActorRef jobClusterActor, final JobDefinition jobDefn, String jobId, ResponseCode code) { jobClusterActor.tell(new JobClusterManagerProto.SubmitJobRequest(clusterName, "user", Optional.ofNullable(jobDefn)), probe.getRef()); JobClusterManagerProto.SubmitJobResponse submitResponse = probe.expectMsgClass(JobClusterManagerProto.SubmitJobResponse.class); assertEquals(code, submitResponse.responseCode); if (jobId == null) { assertTrue(!submitResponse.getJobId().isPresent()); } else { assertEquals(jobId, submitResponse.getJobId().get().getId()); } } public static ActorRef submitSingleStageScalableJob(ActorSystem system, TestKit probe, String clusterName, SchedulingInfo sInfo, MantisScheduler schedulerMock, MantisJobStore jobStoreMock, LifecycleEventPublisher lifecycleEventPublisher) throws io.mantisrx.runtime.command.InvalidJobException { IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName, sInfo); JobDefinition jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(new JobId(clusterName, 1)) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefn) .build(); final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, lifecycleEventPublisher)); jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class); assertEquals(SUCCESS, initMsg.responseCode); String jobId = clusterName + "-1"; jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); JobClusterManagerProto.GetJobDetailsResponse resp = probe.expectMsgClass(JobClusterManagerProto.GetJobDetailsResponse.class); System.out.println("resp " + resp + " msg " + resp.message); assertEquals(SUCCESS, resp.responseCode); assertEquals(JobState.Accepted, resp.getJobMetadata().get().getState()); int stageNo = 1; // send launched event int lastWorkerNum = 0; JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, 0, new WorkerId(jobId, 0, ++lastWorkerNum)); //JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobActor,jobId,1,new WorkerId(jobId,0,2)); for (int i = 0; i < sInfo.forStage(stageNo).getNumberOfInstances(); i++) { WorkerId workerId = new WorkerId(jobId, i, ++lastWorkerNum); JobTestHelper.sendWorkerLaunchedEvent(probe, jobActor, workerId, stageNo); // start initiated event JobTestHelper.sendStartInitiatedEvent(probe, jobActor, stageNo, workerId); // send heartbeat JobTestHelper.sendHeartBeat(probe, jobActor, jobId, stageNo, workerId, System.currentTimeMillis() + 1000); } // check job status again jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef()); //jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef()); JobClusterManagerProto.GetJobDetailsResponse resp2 = probe.expectMsgClass(JobClusterManagerProto.GetJobDetailsResponse.class); System.out.println("resp " + resp2 + " msg " + resp2.message); assertEquals(SUCCESS, resp2.responseCode); // 1 worker has started. so job has started assertEquals(JobState.Launched, resp2.getJobMetadata().get().getState()); return jobActor; } @Test public void testCalculateRuntimeLimitForAlreadyStartedJob() { Instant now = Instant.now(); Instant startedAt = now.minusSeconds(5); assertEquals(5, JobHelper.calculateRuntimeDuration(10, startedAt)); } @Test public void testCalculateRuntimeLimitForJustStartedJob() { Instant now = Instant.now(); Instant startedAt = now; assertEquals(10, JobHelper.calculateRuntimeDuration(10, startedAt)); } @Test public void testCalculateRuntimeLimitForAlreadyExpiredJob() { Instant now = Instant.now(); Instant startedAt = now.minusSeconds(15); assertEquals(1, JobHelper.calculateRuntimeDuration(10, startedAt)); } }
4,226
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/jobcluster/job/JobClusterManagerTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import static io.mantisrx.master.jobcluster.JobClusterTest.DEFAULT_JOB_OWNER; import static io.mantisrx.master.jobcluster.JobClusterTest.NO_OP_SLA; import static io.mantisrx.master.jobcluster.JobClusterTest.TWO_WORKER_SCHED_INFO; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_CONFLICT; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_NOT_FOUND; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS_CREATED; import static java.util.Optional.empty; import static java.util.Optional.of; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; import java.net.MalformedURLException; import java.time.Duration; import java.time.temporal.ChronoUnit; import java.util.List; import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.testkit.javadsl.TestKit; import io.mantisrx.shaded.com.google.common.collect.Lists; import com.netflix.mantis.master.scheduler.TestHelpers; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import io.mantisrx.common.Label; import io.mantisrx.common.WorkerPorts; import io.mantisrx.master.JobClustersManagerActor; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.MantisJobClusterMetadataView; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.master.jobcluster.job.worker.WorkerStatus; import io.mantisrx.master.jobcluster.proto.BaseResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.JobClustersManagerInitializeResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLARequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.WorkerMigrationConfig.MigrationStrategyEnum; import io.mantisrx.runtime.command.InvalidJobException; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.Status; import io.mantisrx.server.core.Status.TYPE; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.domain.IJobClusterDefinition; import io.mantisrx.server.master.domain.JobClusterConfig; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.domain.SLA; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.MantisStorageProviderAdapter; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.scheduler.WorkerEvent; import io.mantisrx.server.master.scheduler.WorkerLaunched; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; import rx.schedulers.Schedulers; import rx.subjects.BehaviorSubject; public class JobClusterManagerTest { static ActorSystem system; private static MantisJobStore jobStoreMock; private static ActorRef jobClusterManagerActor; private static MantisScheduler schedulerMock; private static LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl( new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); private static final String user = "nj"; @BeforeClass public static void setup() { Config config = ConfigFactory.parseString("akka {\n" + " loggers = [\"akka.testkit.TestEventListener\"]\n" + " loglevel = \"WARNING\"\n" + " stdout-loglevel = \"WARNING\"\n" + "}\n"); system = ActorSystem.create( "JobClusterManagerTest", config.withFallback(ConfigFactory.load())); TestHelpers.setupMasterConfig(); jobStoreMock = mock(MantisJobStore.class); schedulerMock = mock(MantisScheduler.class); jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props( jobStoreMock, eventPublisher)); jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize( schedulerMock, true), ActorRef.noSender()); } @AfterClass public static void tearDown() { JobTestHelper.deleteAllFiles(); TestKit.shutdownActorSystem(system); system = null; } private JobClusterDefinitionImpl createFakeJobClusterDefn( final String name, List<Label> labels) { return createFakeJobClusterDefn(name, labels, WorkerMigrationConfig.DEFAULT); } private JobClusterDefinitionImpl createFakeJobClusterDefn( final String name, List<Label> labels, WorkerMigrationConfig migrationConfig) { JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName("myart") .withSchedulingInfo(new SchedulingInfo.Builder().numberOfStages(1) .singleWorkerStageWithConstraints( new MachineDefinition( 0, 0, 0, 0, 0), Lists.newArrayList(), Lists.newArrayList()) .build()) .withVersion("0.0.1") .build(); return new JobClusterDefinitionImpl.Builder() .withName(name) .withUser(user) .withJobClusterConfig(clusterConfig) .withParameters(Lists.newArrayList()) .withLabels(labels) .withSla(new SLA(0, 1, null, IJobClusterDefinition.CronPolicy.KEEP_EXISTING)) .withIsReadyForJobMaster(true) .withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo")) .withMigrationConfig(migrationConfig) .build(); } private JobDefinition createJob(String name2) throws InvalidJobException { return new JobDefinition.Builder() .withName(name2) .withParameters(Lists.newArrayList()) .withLabels(Lists.newArrayList()) .withSchedulingInfo(new SchedulingInfo.Builder().numberOfStages(1) .singleWorkerStageWithConstraints( new MachineDefinition( 1, 10, 10, 10, 2), Lists.newArrayList(), Lists.newArrayList()) .build()) .withArtifactName("myart") .withSubscriptionTimeoutSecs(0) .withUser("njoshi") .withJobSla(new JobSla(0, 0, null, MantisJobDurationType.Transient, null)) .build(); } private void createJobClusterAndAssert(ActorRef jobClusterManagerActor, String clusterName) { createJobClusterAndAssert( jobClusterManagerActor, clusterName, WorkerMigrationConfig.DEFAULT); } private void createJobClusterAndAssert( ActorRef jobClusterManagerActor, String clusterName, WorkerMigrationConfig migrationConfig) { TestKit probe = new TestKit(system); JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, Lists.newArrayList(), migrationConfig); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, resp.responseCode); } private void submitJobAndAssert(ActorRef jobClusterManagerActor, String cluster) { TestKit probe = new TestKit(system); JobDefinition jobDefn; try { jobDefn = createJob(cluster); jobClusterManagerActor.tell( new JobClusterManagerProto.SubmitJobRequest( cluster, "me", Optional.ofNullable(jobDefn)), probe.getRef()); JobClusterManagerProto.SubmitJobResponse submitResp = probe.expectMsgClass( JobClusterManagerProto.SubmitJobResponse.class); assertEquals(SUCCESS, submitResp.responseCode); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } } @Test public void testBootStrapJobClustersAndJobs1() { TestKit probe = new TestKit(system); JobTestHelper.deleteAllFiles(); MantisJobStore jobStore = new MantisJobStore(new MantisStorageProviderAdapter( new io.mantisrx.server.master.store.SimpleCachedFileStorageProvider(), eventPublisher)); MantisJobStore jobStoreSpied = Mockito.spy(jobStore); MantisScheduler schedulerMock = mock(MantisScheduler.class); ActorRef jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props( jobStoreSpied, eventPublisher)); jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize( schedulerMock, true), probe.getRef()); JobClustersManagerInitializeResponse iResponse = probe.expectMsgClass(Duration.of( 10, ChronoUnit.MINUTES), JobClustersManagerInitializeResponse.class); //List<String> clusterNames = Lists.newArrayList("testBootStrapJobClustersAndJobs1"); String clusterWithNoJob = "testBootStrapJobClusterWithNoJob"; createJobClusterAndAssert(jobClusterManagerActor, clusterWithNoJob); // kill 1 of the jobs to test archive path // Stop job cluster Manager Actor system.stop(jobClusterManagerActor); // create new instance jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props( jobStore, eventPublisher)); // initialize it jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize( schedulerMock, true), probe.getRef()); //JobClusterManagerProto.JobClustersManagerInitializeResponse initializeResponse = probe.expectMsgClass(JobClusterManagerProto.JobClustersManagerInitializeResponse.class); JobClustersManagerInitializeResponse initializeResponse = probe.expectMsgClass(Duration.of( 10, ChronoUnit.MINUTES), JobClustersManagerInitializeResponse.class); assertEquals(SUCCESS, initializeResponse.responseCode); jobClusterManagerActor.tell(new GetJobClusterRequest(clusterWithNoJob), probe.getRef()); GetJobClusterResponse jobClusterResponse = probe.expectMsgClass(Duration.of( 10, ChronoUnit.MINUTES), GetJobClusterResponse.class); assertEquals(SUCCESS, jobClusterResponse.responseCode); assertTrue(jobClusterResponse.getJobCluster().isPresent()); assertEquals(clusterWithNoJob, jobClusterResponse.getJobCluster().get().getName()); // // 1 running worker // verify(schedulerMock,timeout(100_1000).times(1)).initializeRunningWorker(any(),any()); // // // 2 worker schedule requests // verify(schedulerMock,timeout(100_000).times(4)).scheduleWorker(any()); try { // Mockito.verify(jobStoreSpied).loadAllArchivedJobsAsync(); Mockito.verify(jobStoreSpied).loadAllJobClusters(); Mockito.verify(jobStoreSpied).loadAllActiveJobs(); Mockito.verify(jobStoreSpied).loadAllCompletedJobs(); // Mockito.verify(jobStoreSpied).archiveWorker(any()); // Mockito.verify(jobStoreSpied).archiveJob(any()); } catch (IOException e) { e.printStackTrace(); fail(); } } @Test public void testBootStrapJobClustersAndJobsNegativeTest() throws IOException { TestKit probe = new TestKit(system); JobTestHelper.deleteAllFiles(); MantisStorageProviderAdapter storageProviderAdapter = mock(MantisStorageProviderAdapter.class); when(storageProviderAdapter.loadAllJobClusters()).thenThrow(new IOException( "StorageException")); MantisJobStore jobStore = new MantisJobStore(storageProviderAdapter); MantisJobStore jobStoreSpied = Mockito.spy(jobStore); MantisScheduler schedulerMock = mock(MantisScheduler.class); ActorRef jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props( jobStoreSpied, eventPublisher)); jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize( schedulerMock, true), probe.getRef()); JobClustersManagerInitializeResponse iResponse = probe.expectMsgClass(Duration.of( 10, ChronoUnit.MINUTES), JobClustersManagerInitializeResponse.class); assertEquals(BaseResponse.ResponseCode.SERVER_ERROR, iResponse.responseCode); } @Test public void testBootStrapJobClustersAndJobs() { TestKit probe = new TestKit(system); JobTestHelper.deleteAllFiles(); MantisJobStore jobStore = new MantisJobStore(new MantisStorageProviderAdapter( new io.mantisrx.server.master.store.SimpleCachedFileStorageProvider(), eventPublisher)); MantisJobStore jobStoreSpied = Mockito.spy(jobStore); MantisScheduler schedulerMock = mock(MantisScheduler.class); ActorRef jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props( jobStoreSpied, eventPublisher)); jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize( schedulerMock, false), probe.getRef()); JobClustersManagerInitializeResponse iResponse = probe.expectMsgClass(Duration.of( 10, ChronoUnit.MINUTES), JobClustersManagerInitializeResponse.class); List<String> clusterNames = Lists.newArrayList("testBootStrapJobClustersAndJobs1", "testBootStrapJobClustersAndJobs2", "testBootStrapJobClustersAndJobs3"); String clusterWithNoJob = "testBootStrapJobClusterWithNoJob"; createJobClusterAndAssert(jobClusterManagerActor, clusterWithNoJob); WorkerMigrationConfig migrationConfig = new WorkerMigrationConfig(MigrationStrategyEnum.PERCENTAGE, "{\"percentToMove\":60, \"intervalMs\":30000}"); // Create 3 clusters and submit 1 job each for (String cluster : clusterNames) { createJobClusterAndAssert(jobClusterManagerActor, cluster, migrationConfig); submitJobAndAssert(jobClusterManagerActor, cluster); if (cluster.equals("testBootStrapJobClustersAndJobs1")) { // send worker events for job 1 so it goes to started state String jobId = "testBootStrapJobClustersAndJobs1-1"; WorkerId workerId = new WorkerId(jobId, 0, 1); WorkerEvent launchedEvent = new WorkerLaunched( workerId, 0, "host1", "vm1", empty(), new WorkerPorts(Lists.newArrayList(8000, 9000, 9010, 9020, 9030))); jobClusterManagerActor.tell(launchedEvent, probe.getRef()); WorkerEvent startInitEvent = new WorkerStatus(new Status( workerId.getJobId(), 1, workerId.getWorkerIndex(), workerId.getWorkerNum(), TYPE.INFO, "test START_INIT", MantisJobState.StartInitiated)); jobClusterManagerActor.tell(startInitEvent, probe.getRef()); WorkerEvent heartBeat = new WorkerHeartbeat(new Status( jobId, 1, workerId.getWorkerIndex(), workerId.getWorkerNum(), TYPE.HEARTBEAT, "", MantisJobState.Started)); jobClusterManagerActor.tell(heartBeat, probe.getRef()); // get Job status jobClusterManagerActor.tell( new GetJobDetailsRequest( "user", JobId.fromId(jobId).get()), probe.getRef()); GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class); // Ensure its launched assertEquals(SUCCESS, resp2.responseCode); assertEquals(JobState.Launched, resp2.getJobMetadata().get().getState()); } } // kill 1 of the jobs to test archive path JobClusterManagerProto.KillJobRequest killRequest = new JobClusterManagerProto.KillJobRequest( "testBootStrapJobClustersAndJobs2-1", JobCompletedReason.Killed.toString(), "njoshi"); jobClusterManagerActor.tell(killRequest, probe.getRef()); JobClusterManagerProto.KillJobResponse killJobResponse = probe.expectMsgClass( JobClusterManagerProto.KillJobResponse.class); assertEquals(SUCCESS, killJobResponse.responseCode); JobTestHelper.sendWorkerTerminatedEvent( probe, jobClusterManagerActor, "testBootStrapJobClustersAndJobs2-1", new WorkerId("testBootStrapJobClustersAndJobs2-1", 0, 1)); try { Thread.sleep(500); } catch (InterruptedException e) { e.printStackTrace(); } // Stop job cluster Manager Actor system.stop(jobClusterManagerActor); // create new instance jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props( jobStoreSpied, eventPublisher)); // initialize it jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize( schedulerMock, true), probe.getRef()); JobClustersManagerInitializeResponse initializeResponse = probe.expectMsgClass( JobClustersManagerInitializeResponse.class); //probe.expectMsgClass(Duration.of(10, ChronoUnit.MINUTES),JobClusterManagerProto.JobClustersManagerInitializeResponse.class); //probe.expectMsgClass(JobClusterManagerProto.JobClustersManagerInitializeResponse.class); assertEquals(SUCCESS, initializeResponse.responseCode); // Get Cluster Config jobClusterManagerActor.tell(new GetJobClusterRequest("testBootStrapJobClustersAndJobs1"), probe.getRef()); GetJobClusterResponse clusterResponse = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, clusterResponse.responseCode); assertTrue(clusterResponse.getJobCluster().isPresent()); WorkerMigrationConfig mConfig = clusterResponse.getJobCluster().get().getMigrationConfig(); assertEquals(migrationConfig.getStrategy(), mConfig.getStrategy()); assertEquals(migrationConfig.getConfigString(), migrationConfig.getConfigString()); // get Job status jobClusterManagerActor.tell(new GetJobDetailsRequest( "user", JobId.fromId("testBootStrapJobClustersAndJobs1-1") .get()), probe.getRef()); GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class); // Ensure its launched System.out.println("Resp2 -> " + resp2.message); assertEquals(SUCCESS, resp2.responseCode); assertEquals(JobState.Launched, resp2.getJobMetadata().get().getState()); // 1 jobs should be in completed state jobClusterManagerActor.tell(new GetJobDetailsRequest( "user", JobId.fromId("testBootStrapJobClustersAndJobs2-1") .get()), probe.getRef()); resp2 = probe.expectMsgClass(Duration.of(10, ChronoUnit.MINUTES), GetJobDetailsResponse.class); // Ensure its completed assertEquals(SUCCESS, resp2.responseCode); assertEquals(JobState.Completed, resp2.getJobMetadata().get().getState()); jobClusterManagerActor.tell(new GetJobDetailsRequest( "user", JobId.fromId("testBootStrapJobClustersAndJobs3-1") .get()), probe.getRef()); resp2 = probe.expectMsgClass(Duration.of(10, ChronoUnit.MINUTES), GetJobDetailsResponse.class); // Ensure its Accepted assertEquals(SUCCESS, resp2.responseCode); assertEquals(JobState.Accepted, resp2.getJobMetadata().get().getState()); try { Optional<JobWorker> workerByIndex = resp2.getJobMetadata().get().getWorkerByIndex(1, 0); assertTrue(workerByIndex.isPresent()); Optional<IMantisStageMetadata> stageMetadata = resp2.getJobMetadata() .get() .getStageMetadata(1); assertTrue(stageMetadata.isPresent()); JobWorker workerByIndex1 = stageMetadata.get().getWorkerByIndex(0); System.out.println("Got worker by index : " + workerByIndex1); Optional<JobWorker> worker = resp2.getJobMetadata().get().getWorkerByNumber(1); assertTrue(worker.isPresent()); } catch (io.mantisrx.server.master.persistence.exceptions.InvalidJobException e) { e.printStackTrace(); } jobClusterManagerActor.tell(new GetLastSubmittedJobIdStreamRequest( "testBootStrapJobClustersAndJobs1"), probe.getRef()); GetLastSubmittedJobIdStreamResponse lastSubmittedJobIdStreamResponse = probe.expectMsgClass( Duration.of(10, ChronoUnit.MINUTES), GetLastSubmittedJobIdStreamResponse.class); lastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject() .get() .take(1) .toBlocking() .subscribe((jId) -> { assertEquals(new JobId( "testBootStrapJobClustersAndJobs1", 1), jId); }); jobClusterManagerActor.tell(new GetJobClusterRequest(clusterWithNoJob), probe.getRef()); GetJobClusterResponse jobClusterResponse = probe.expectMsgClass(Duration.of( 10, ChronoUnit.MINUTES), GetJobClusterResponse.class); assertEquals(SUCCESS, jobClusterResponse.responseCode); assertTrue(jobClusterResponse.getJobCluster().isPresent()); assertEquals(clusterWithNoJob, jobClusterResponse.getJobCluster().get().getName()); // 1 running worker verify(schedulerMock, timeout(100_1000).times(1)).initializeRunningWorker(any(), any()); // 2 worker schedule requests verify(schedulerMock, timeout(100_000).times(4)).scheduleWorker(any()); try { Mockito.verify(jobStoreSpied).loadAllArchivedJobsAsync(); Mockito.verify(jobStoreSpied).loadAllActiveJobs(); Mockito.verify(jobStoreSpied).loadAllCompletedJobs(); Mockito.verify(jobStoreSpied).archiveWorker(any()); Mockito.verify(jobStoreSpied).archiveJob(any()); } catch (IOException e) { e.printStackTrace(); fail(); } } /** * Case for a master leader re-election when a new master re-hydrates corrupted job worker metadata. */ @Test public void testBootstrapJobClusterAndJobsWithCorruptedWorkerPorts() throws IOException, io.mantisrx.server.master.persistence.exceptions.InvalidJobException { TestKit probe = new TestKit(system); JobTestHelper.deleteAllFiles(); MantisJobStore jobStore = new MantisJobStore(new MantisStorageProviderAdapter( new io.mantisrx.server.master.store.SimpleCachedFileStorageProvider(), eventPublisher)); MantisJobStore jobStoreSpied = Mockito.spy(jobStore); MantisScheduler schedulerMock = mock(MantisScheduler.class); ActorRef jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props( jobStoreSpied, eventPublisher)); jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize( schedulerMock, false), probe.getRef()); probe.expectMsgClass(Duration.of( 10, ChronoUnit.MINUTES), JobClustersManagerInitializeResponse.class); String jobClusterName = "testBootStrapJobClustersAndJobs1"; WorkerMigrationConfig migrationConfig = new WorkerMigrationConfig(MigrationStrategyEnum.PERCENTAGE, "{\"percentToMove\":60, \"intervalMs\":30000}"); createJobClusterAndAssert(jobClusterManagerActor, jobClusterName, migrationConfig); submitJobAndAssert(jobClusterManagerActor, jobClusterName); String jobId = "testBootStrapJobClustersAndJobs1-1"; WorkerId workerId = new WorkerId(jobId, 0, 1); WorkerEvent launchedEvent = new WorkerLaunched( workerId, 0, "host1", "vm1", empty(), new WorkerPorts(Lists.newArrayList(8000, 9000, 9010, 9020, 9030))); jobClusterManagerActor.tell(launchedEvent, probe.getRef()); WorkerEvent startInitEvent = new WorkerStatus(new Status( workerId.getJobId(), 1, workerId.getWorkerIndex(), workerId.getWorkerNum(), TYPE.INFO, "test START_INIT", MantisJobState.StartInitiated)); jobClusterManagerActor.tell(startInitEvent, probe.getRef()); WorkerEvent heartBeat = new WorkerHeartbeat(new Status( jobId, 1, workerId.getWorkerIndex(), workerId.getWorkerNum(), TYPE.HEARTBEAT, "", MantisJobState.Started)); jobClusterManagerActor.tell(heartBeat, probe.getRef()); // get Job status jobClusterManagerActor.tell( new GetJobDetailsRequest( "user", JobId.fromId(jobId).get()), probe.getRef()); GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class); // Ensure its launched assertEquals(SUCCESS, resp2.responseCode); JobWorker worker = new JobWorker.Builder() .withWorkerIndex(0) .withWorkerNumber(1) .withJobId(jobId) .withStageNum(1) .withNumberOfPorts(5) .withWorkerPorts(null) .withState(WorkerState.Started) .withLifecycleEventsPublisher(eventPublisher) .build(); jobStoreSpied.updateWorker(worker.getMetadata()); // Stop job cluster Manager Actor system.stop(jobClusterManagerActor); // create new instance jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props( jobStoreSpied, eventPublisher)); // initialize it jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize( schedulerMock, true), probe.getRef()); JobClustersManagerInitializeResponse initializeResponse = probe.expectMsgClass( JobClustersManagerInitializeResponse.class); assertEquals(SUCCESS, initializeResponse.responseCode); WorkerId newWorkerId = new WorkerId(jobId, 0, 11); launchedEvent = new WorkerLaunched( newWorkerId, 0, "host1", "vm1", empty(), new WorkerPorts(Lists.newArrayList(8000, 9000, 9010, 9020, 9030))); jobClusterManagerActor.tell(launchedEvent, probe.getRef()); // Get Cluster Config jobClusterManagerActor.tell(new GetJobClusterRequest("testBootStrapJobClustersAndJobs1"), probe.getRef()); GetJobClusterResponse clusterResponse = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, clusterResponse.responseCode); assertTrue(clusterResponse.getJobCluster().isPresent()); WorkerMigrationConfig mConfig = clusterResponse.getJobCluster().get().getMigrationConfig(); assertEquals(migrationConfig.getStrategy(), mConfig.getStrategy()); assertEquals(migrationConfig.getConfigString(), migrationConfig.getConfigString()); // get Job status jobClusterManagerActor.tell(new GetJobDetailsRequest( "user", JobId.fromId("testBootStrapJobClustersAndJobs1-1") .get()), probe.getRef()); resp2 = probe.expectMsgClass(GetJobDetailsResponse.class); // Ensure its launched assertEquals(SUCCESS, resp2.responseCode); assertEquals(JobState.Launched, resp2.getJobMetadata().get().getState()); IMantisWorkerMetadata mantisWorkerMetadata = resp2.getJobMetadata().get() .getWorkerByIndex(1, 0).get() .getMetadata(); assertNotNull(mantisWorkerMetadata.getWorkerPorts()); assertEquals(11, mantisWorkerMetadata.getWorkerNumber()); assertEquals(1, mantisWorkerMetadata.getTotalResubmitCount()); jobClusterManagerActor.tell(new GetLastSubmittedJobIdStreamRequest( "testBootStrapJobClustersAndJobs1"), probe.getRef()); GetLastSubmittedJobIdStreamResponse lastSubmittedJobIdStreamResponse = probe.expectMsgClass( Duration.of(10, ChronoUnit.MINUTES), GetLastSubmittedJobIdStreamResponse.class); lastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject() .get() .take(1) .toBlocking() .subscribe((jId) -> { assertEquals(new JobId( "testBootStrapJobClustersAndJobs1", 1), jId); }); // Two schedules: one for the initial success, one for a resubmit from corrupted worker ports. verify(schedulerMock, times(2)).scheduleWorker(any()); // One unschedule from corrupted worker ID 1 (before the resubmit). verify(schedulerMock, times(1)).unscheduleAndTerminateWorker(eq(workerId), any()); try { Mockito.verify(jobStoreSpied).loadAllArchivedJobsAsync(); Mockito.verify(jobStoreSpied).loadAllActiveJobs(); Mockito.verify(jobStoreSpied).loadAllCompletedJobs(); Mockito.verify(jobStoreSpied).archiveWorker(any()); } catch (IOException e) { e.printStackTrace(); fail(); } } @Test public void testJobClusterCreate() throws MalformedURLException { TestKit probe = new TestKit(system); String clusterName = "testJobClusterCreateCluster"; final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, Lists.newArrayList()); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, resp.responseCode); jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse resp2 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp2.responseCode); assertEquals(clusterName, resp2.getJobCluster().get().getName()); //assertEquals(jobClusterManagerActor, probe.getLastSender().path()); } @Test public void testJobClusterCreateDupFails() throws MalformedURLException { TestKit probe = new TestKit(system); String clusterName = "testJobClusterCreateDupFails"; final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, Lists.newArrayList()); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, resp.responseCode); jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse resp2 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp2.responseCode); assertEquals(clusterName, resp2.getJobCluster().get().getName()); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse resp3 = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); System.out.println("Got resp -> " + resp3); assertEquals(CLIENT_ERROR_CONFLICT, resp3.responseCode); // make sure first cluster is still there jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse resp4 = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, resp4.responseCode); assertEquals(clusterName, resp4.getJobCluster().get().getName()); //assertEquals(jobClusterManagerActor, probe.getLastSender().path()); } @Test public void testListJobClusters() { TestKit probe = new TestKit(system); String clusterName = "testListJobClusters"; JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, Lists.newArrayList()); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, resp.responseCode); String clusterName2 = "testListJobClusters2"; fakeJobCluster = createFakeJobClusterDefn(clusterName2, Lists.newArrayList()); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); resp = probe.expectMsgClass(JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, resp.responseCode); jobClusterManagerActor.tell( new JobClusterManagerProto.ListJobClustersRequest(), probe.getRef()); JobClusterManagerProto.ListJobClustersResponse resp2 = probe.expectMsgClass( JobClusterManagerProto.ListJobClustersResponse.class); assertTrue(2 <= resp2.getJobClusters().size()); List<MantisJobClusterMetadataView> jClusters = resp2.getJobClusters(); int cnt = 0; for (MantisJobClusterMetadataView jCluster : jClusters) { if (jCluster.getName().equals(clusterName) || jCluster.getName().equals(clusterName2)) { cnt++; } } assertEquals(2, cnt); } @Test public void testListJobs() throws InvalidJobException { TestKit probe = new TestKit(system); //create cluster 1 String clusterName = "testListJobs"; JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, Lists.newArrayList()); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, resp.responseCode); // submit job to this cluster JobDefinition jobDefn = createJob(clusterName); jobClusterManagerActor.tell( new JobClusterManagerProto.SubmitJobRequest( clusterName, "me", Optional.ofNullable(jobDefn)), probe.getRef()); JobClusterManagerProto.SubmitJobResponse submitResp = probe.expectMsgClass( JobClusterManagerProto.SubmitJobResponse.class); assertEquals(SUCCESS, submitResp.responseCode); // create cluster 2 String clusterName2 = "testListJobs2"; fakeJobCluster = createFakeJobClusterDefn(clusterName2, Lists.newArrayList()); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); resp = probe.expectMsgClass(JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, resp.responseCode); // submit job to this cluster jobDefn = createJob(clusterName2); jobClusterManagerActor.tell( new JobClusterManagerProto.SubmitJobRequest( clusterName2, "me", Optional.ofNullable(jobDefn)), probe.getRef()); submitResp = probe.expectMsgClass(JobClusterManagerProto.SubmitJobResponse.class); assertEquals(SUCCESS, submitResp.responseCode); jobClusterManagerActor.tell(new JobClusterManagerProto.ListJobsRequest(), probe.getRef()); JobClusterManagerProto.ListJobsResponse listResp = probe.expectMsgClass( JobClusterManagerProto.ListJobsResponse.class); System.out.println("Got " + listResp.getJobList().size()); boolean foundJob1 = false; boolean foundJob2 = false; for (MantisJobMetadataView v : listResp.getJobList()) { System.out.println("Job -> " + v.getJobMetadata().getJobId()); String jId = v.getJobMetadata().getJobId(); if (jId.equals("testListJobs-1")) { foundJob1 = true; } else if (jId.equals("testListJobs2-1")) { foundJob2 = true; } } assertTrue(listResp.getJobList().size() >= 2); assertTrue(foundJob1 && foundJob2); } @Test public void testJobClusterUpdateAndDelete() throws MalformedURLException { TestKit probe = new TestKit(system); String clusterName = "testJobClusterUpdateAndDeleteCluster"; List<Label> labels = Lists.newLinkedList(); Label l = new Label("labelname", "labelvalue"); labels.add(l); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, labels); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, createResp.responseCode); JobClusterConfig clusterConfig = new JobClusterConfig.Builder() .withArtifactName("myart2") .withSchedulingInfo(TWO_WORKER_SCHED_INFO) .withVersion("0.0.2") .build(); final JobClusterDefinitionImpl updatedFakeJobCluster = new JobClusterDefinitionImpl.Builder() .withJobClusterConfig(clusterConfig) .withName(clusterName) .withParameters(Lists.newArrayList()) .withUser(user) .withIsReadyForJobMaster(true) .withOwner(DEFAULT_JOB_OWNER) .withMigrationConfig(WorkerMigrationConfig.DEFAULT) .withSla(NO_OP_SLA) .build(); jobClusterManagerActor.tell(new JobClusterManagerProto.UpdateJobClusterRequest( updatedFakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.UpdateJobClusterResponse updateResp = probe.expectMsgClass( JobClusterManagerProto.UpdateJobClusterResponse.class); if (SUCCESS != updateResp.responseCode) { System.out.println("Update cluster response: " + updateResp); } assertEquals(SUCCESS, updateResp.responseCode); // assertEquals(jobClusterManagerActor, probe.getLastSender()); jobClusterManagerActor.tell( new JobClusterManagerProto.DeleteJobClusterRequest(user, clusterName), probe.getRef()); JobClusterManagerProto.DeleteJobClusterResponse deleteResp = probe.expectMsgClass( JobClusterManagerProto.DeleteJobClusterResponse.class); assertEquals(SUCCESS, deleteResp.responseCode); // assertEquals(jobClusterManagerActor, probe.getLastSender()); } @Test public void testJobClusterSLAUpdate() throws MalformedURLException { TestKit probe = new TestKit(system); String clusterName = "testJobClusterSLAUpdate"; List<Label> labels = Lists.newLinkedList(); Label l = new Label("labelname", "labelvalue"); labels.add(l); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, labels); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, createResp.responseCode); UpdateJobClusterSLARequest req = new JobClusterManagerProto.UpdateJobClusterSLARequest( clusterName, 1, 2, "user"); jobClusterManagerActor.tell(req, probe.getRef()); JobClusterManagerProto.UpdateJobClusterSLAResponse updateResp = probe.expectMsgClass( JobClusterManagerProto.UpdateJobClusterSLAResponse.class); assertEquals(SUCCESS, updateResp.responseCode); // assertEquals(jobClusterManagerActor, probe.getLastSender()); jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse getResp = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, getResp.responseCode); assertEquals(1, getResp.getJobCluster().get().getSla().getMin()); assertEquals(2, getResp.getJobCluster().get().getSla().getMax()); // assertEquals(jobClusterManagerActor, probe.getLastSender()); } @Test public void testJobClusterLabelUpdate() throws MalformedURLException { TestKit probe = new TestKit(system); String clusterName = "testJobClusterLabelUpdate"; List<Label> labels = Lists.newLinkedList(); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, labels); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, createResp.responseCode); List<Label> labels2 = Lists.newLinkedList(); Label l = new Label("labelname", "labelvalue"); labels2.add(l); UpdateJobClusterLabelsRequest req = new JobClusterManagerProto.UpdateJobClusterLabelsRequest( clusterName, labels2, "user"); jobClusterManagerActor.tell(req, probe.getRef()); JobClusterManagerProto.UpdateJobClusterLabelsResponse updateResp = probe.expectMsgClass( JobClusterManagerProto.UpdateJobClusterLabelsResponse.class); assertEquals(SUCCESS, updateResp.responseCode); jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse getResp = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, getResp.responseCode); assertEquals(1, getResp.getJobCluster().get().getLabels().size()); assertEquals(l, getResp.getJobCluster().get().getLabels().get(0)); } @Test public void testJobClusterArtifactUpdate() throws MalformedURLException { TestKit probe = new TestKit(system); String clusterName = "testJobClusterArtifactUpdate"; List<Label> labels = Lists.newLinkedList(); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, labels); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, createResp.responseCode); UpdateJobClusterArtifactRequest req = new JobClusterManagerProto.UpdateJobClusterArtifactRequest( clusterName, "myjar", "1.0.1", true, "user"); jobClusterManagerActor.tell(req, probe.getRef()); JobClusterManagerProto.UpdateJobClusterArtifactResponse updateResp = probe.expectMsgClass( JobClusterManagerProto.UpdateJobClusterArtifactResponse.class); assertEquals(SUCCESS, updateResp.responseCode); jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse getResp = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, getResp.responseCode); //assertEquals("myjar", getResp.getJobCluster().get().g.getArtifactName()); assertEquals("1.0.1", getResp.getJobCluster().get().getLatestVersion()); } @Test public void testJobClusterWorkerMigrationUpdate() throws MalformedURLException { TestKit probe = new TestKit(system); String clusterName = "testJobClusterWorkerMigrationUpdate"; List<Label> labels = Lists.newLinkedList(); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, labels); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, createResp.responseCode); UpdateJobClusterWorkerMigrationStrategyRequest req = new JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest( clusterName, new WorkerMigrationConfig(MigrationStrategyEnum.ONE_WORKER, "{}"), clusterName); jobClusterManagerActor.tell(req, probe.getRef()); JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse updateResp = probe.expectMsgClass( JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse.class); assertEquals(SUCCESS, updateResp.responseCode); jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse getResp = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, getResp.responseCode); assertEquals( MigrationStrategyEnum.ONE_WORKER, getResp.getJobCluster().get().getMigrationConfig().getStrategy()); } @Test public void testJobClusterDisable() throws MalformedURLException { TestKit probe = new TestKit(system); String clusterName = "testJobClusterDisable"; List<Label> labels = Lists.newLinkedList(); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, labels); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, createResp.responseCode); DisableJobClusterRequest req = new JobClusterManagerProto.DisableJobClusterRequest( clusterName, "user"); jobClusterManagerActor.tell(req, probe.getRef()); JobClusterManagerProto.DisableJobClusterResponse updateResp = probe.expectMsgClass( JobClusterManagerProto.DisableJobClusterResponse.class); assertEquals(SUCCESS, updateResp.responseCode); jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse getResp = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, getResp.responseCode); assertTrue(getResp.getJobCluster().get().isDisabled()); } @Test public void testJobClusterEnable() throws MalformedURLException { TestKit probe = new TestKit(system); String clusterName = "testJobClusterEnable"; List<Label> labels = Lists.newLinkedList(); final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, labels); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); assertEquals(SUCCESS_CREATED, createResp.responseCode); DisableJobClusterRequest req = new JobClusterManagerProto.DisableJobClusterRequest( clusterName, "user"); jobClusterManagerActor.tell(req, probe.getRef()); JobClusterManagerProto.DisableJobClusterResponse updateResp = probe.expectMsgClass( JobClusterManagerProto.DisableJobClusterResponse.class); assertEquals(SUCCESS, updateResp.responseCode); jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); GetJobClusterResponse getResp = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, getResp.responseCode); assertTrue(getResp.getJobCluster().get().isDisabled()); EnableJobClusterRequest req2 = new JobClusterManagerProto.EnableJobClusterRequest( clusterName, "user"); jobClusterManagerActor.tell(req2, probe.getRef()); JobClusterManagerProto.EnableJobClusterResponse updateResp2 = probe.expectMsgClass( JobClusterManagerProto.EnableJobClusterResponse.class); assertEquals(SUCCESS, updateResp2.responseCode); jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef()); getResp = probe.expectMsgClass(GetJobClusterResponse.class); assertEquals(SUCCESS, getResp.responseCode); assertFalse(getResp.getJobCluster().get().isDisabled()); } @Test public void testJobSubmit() { TestKit probe = new TestKit(system); String clusterName = "testJobSubmit"; final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, Lists.newArrayList()); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); System.out.println("response----->" + resp); assertEquals(SUCCESS_CREATED, resp.responseCode); JobDefinition jobDefn; try { jobDefn = createJob(clusterName); jobClusterManagerActor.tell( new JobClusterManagerProto.SubmitJobRequest( clusterName, "me", Optional.ofNullable(jobDefn)), probe.getRef()); JobClusterManagerProto.SubmitJobResponse submitResp = probe.expectMsgClass( JobClusterManagerProto.SubmitJobResponse.class); assertEquals(SUCCESS, submitResp.responseCode); jobClusterManagerActor.tell( new JobClusterManagerProto.KillJobRequest( clusterName + "-1", "", clusterName), probe.getRef()); JobClusterManagerProto.KillJobResponse kill = probe.expectMsgClass( JobClusterManagerProto.KillJobResponse.class); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //assertEquals(jobClusterManagerActor, probe.getLastSender().path()); } @Test public void testWorkerList() { TestKit probe = new TestKit(system); String clusterName = "testWorkerList"; final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, Lists.newArrayList()); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); System.out.println("response----->" + resp); assertEquals(SUCCESS_CREATED, resp.responseCode); JobDefinition jobDefn; try { jobDefn = createJob(clusterName); jobClusterManagerActor.tell( new JobClusterManagerProto.SubmitJobRequest( clusterName, "me", Optional.ofNullable(jobDefn)), probe.getRef()); JobClusterManagerProto.SubmitJobResponse submitResp = probe.expectMsgClass( JobClusterManagerProto.SubmitJobResponse.class); assertEquals(SUCCESS, submitResp.responseCode); jobClusterManagerActor.tell(new JobClusterManagerProto.ListWorkersRequest(new JobId( clusterName, 1)), probe.getRef()); JobClusterManagerProto.ListWorkersResponse listWorkersResponse = probe.expectMsgClass( JobClusterManagerProto.ListWorkersResponse.class); assertEquals(SUCCESS, listWorkersResponse.responseCode); assertEquals(1, listWorkersResponse.getWorkerMetadata().size()); // send list workers request to non existent cluster jobClusterManagerActor.tell(new JobClusterManagerProto.ListWorkersRequest(new JobId( "randomCluster", 1)), probe.getRef()); JobClusterManagerProto.ListWorkersResponse listWorkersResponse2 = probe.expectMsgClass( JobClusterManagerProto.ListWorkersResponse.class); assertEquals(CLIENT_ERROR, listWorkersResponse2.responseCode); assertEquals(0, listWorkersResponse2.getWorkerMetadata().size()); jobClusterManagerActor.tell( new JobClusterManagerProto.KillJobRequest( clusterName + "-1", "", clusterName), probe.getRef()); JobClusterManagerProto.KillJobResponse kill = probe.expectMsgClass( JobClusterManagerProto.KillJobResponse.class); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //assertEquals(jobClusterManagerActor, probe.getLastSender().path()); } @Test public void testGetJobIdSubject() { TestKit probe = new TestKit(system); String clusterName = "testGetJobIdSubject"; final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, Lists.newArrayList()); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); System.out.println("response----->" + resp); assertEquals(SUCCESS_CREATED, resp.responseCode); JobDefinition jobDefn; try { jobClusterManagerActor.tell( new GetLastSubmittedJobIdStreamRequest(clusterName), probe.getRef()); JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse getLastSubmittedJobIdStreamResponse = probe .expectMsgClass(JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse.class); assertEquals(SUCCESS, getLastSubmittedJobIdStreamResponse.responseCode); CountDownLatch jobIdLatch = new CountDownLatch(1); assertTrue(getLastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject().isPresent()); BehaviorSubject<JobId> jobIdBehaviorSubject = getLastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject().get(); jobIdBehaviorSubject.subscribeOn(Schedulers.io()).subscribe((jId) -> { System.out.println("Got Jid -> " + jId); assertEquals(clusterName + "-1", jId.getId()); jobIdLatch.countDown(); }); jobDefn = createJob(clusterName); jobClusterManagerActor.tell( new JobClusterManagerProto.SubmitJobRequest( clusterName, "me", of(jobDefn)), probe.getRef()); JobClusterManagerProto.SubmitJobResponse submitResp = probe.expectMsgClass( JobClusterManagerProto.SubmitJobResponse.class); assertEquals(SUCCESS, submitResp.responseCode); jobIdLatch.await(1, TimeUnit.SECONDS); // try a non existent cluster jobClusterManagerActor.tell( new GetLastSubmittedJobIdStreamRequest("randomC"), probe.getRef()); getLastSubmittedJobIdStreamResponse = probe.expectMsgClass(JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse.class); assertEquals(CLIENT_ERROR_NOT_FOUND, getLastSubmittedJobIdStreamResponse.responseCode); assertTrue(!getLastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject().isPresent()); jobClusterManagerActor.tell( new JobClusterManagerProto.KillJobRequest( clusterName + "-1", "", clusterName), probe.getRef()); JobClusterManagerProto.KillJobResponse kill = probe.expectMsgClass( JobClusterManagerProto.KillJobResponse.class); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } catch (InterruptedException e) { e.printStackTrace(); } //assertEquals(jobClusterManagerActor, probe.getLastSender().path()); } @Test public void testJobSubmitToNonExistentCluster() { TestKit probe = new TestKit(system); String clusterName = "testJobSubmitToNonExistentClusterCluster"; JobDefinition jobDefn; try { jobDefn = createJob(clusterName); jobClusterManagerActor.tell( new JobClusterManagerProto.SubmitJobRequest( clusterName, "me", Optional.ofNullable(jobDefn)), probe.getRef()); JobClusterManagerProto.SubmitJobResponse submitResp = probe.expectMsgClass( JobClusterManagerProto.SubmitJobResponse.class); assertEquals(CLIENT_ERROR_NOT_FOUND, submitResp.responseCode); } catch (InvalidJobException e) { // TODO Auto-generated catch block e.printStackTrace(); fail(); } //assertEquals(jobClusterManagerActor, probe.getLastSender().path()); } @Test public void testTerminalEventFromZombieWorkerIgnored() { TestKit probe = new TestKit(system); String clusterName = "testZombieWorkerHandling"; final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, Lists.newArrayList()); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); System.out.println("response----->" + resp); assertEquals(SUCCESS_CREATED, resp.responseCode); WorkerId zWorker1 = new WorkerId("randomCluster2", "randomCluster2-1", 0, 1); JobTestHelper.sendWorkerTerminatedEvent(probe, jobClusterManagerActor, "randomCluster2-1", zWorker1); verify(schedulerMock, timeout(1_000).times(0)).unscheduleAndTerminateWorker(zWorker1, empty()); } @Test public void testNonTerminalEventFromZombieWorkerLeadsToTermination() { TestKit probe = new TestKit(system); String clusterName = "testNonTerminalEventFromZombieWorkerLeadsToTermination"; final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn( clusterName, Lists.newArrayList()); jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest( fakeJobCluster, "user"), probe.getRef()); JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass( JobClusterManagerProto.CreateJobClusterResponse.class); System.out.println("response----->" + resp); assertEquals(SUCCESS_CREATED, resp.responseCode); WorkerId zWorker1 = new WorkerId("randomCluster", "randomCluster-1", 0, 1); JobTestHelper.sendStartInitiatedEvent(probe, jobClusterManagerActor, 1, zWorker1); verify(schedulerMock, timeout(1_000).times(1)).unscheduleAndTerminateWorker(zWorker1, empty()); } }
4,227
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/MantisMasterAPI.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka; import akka.NotUsed; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.actor.DeadLetter; import akka.actor.Props; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.http.javadsl.server.AllDirectives; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import com.netflix.fenzo.AutoScaleAction; import com.netflix.fenzo.AutoScaleRule; import com.netflix.fenzo.VirtualMachineLease; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.DeadLetterActor; import io.mantisrx.master.JobClustersManagerActor; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler; import io.mantisrx.master.api.akka.route.v0.AgentClusterRoute; import io.mantisrx.master.api.akka.route.v0.JobClusterRoute; import io.mantisrx.master.api.akka.route.v0.JobDiscoveryRoute; import io.mantisrx.master.api.akka.route.v0.JobStatusRoute; import io.mantisrx.master.api.akka.route.v0.JobRoute; import io.mantisrx.master.api.akka.route.v0.MasterDescriptionRoute; import io.mantisrx.master.api.akka.route.v1.AdminMasterRoute; import io.mantisrx.master.api.akka.route.v1.AgentClustersRoute; import io.mantisrx.master.api.akka.route.v1.JobClustersRoute; import io.mantisrx.master.api.akka.route.MantisMasterRoute; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.v1.JobDiscoveryStreamRoute; import io.mantisrx.master.api.akka.route.v1.JobStatusStreamRoute; import io.mantisrx.master.api.akka.route.v1.JobsRoute; import io.mantisrx.master.api.akka.route.v1.LastSubmittedJobIdStreamRoute; import io.mantisrx.master.events.AuditEventBrokerActor; import io.mantisrx.master.events.AuditEventSubscriber; import io.mantisrx.master.events.AuditEventSubscriberAkkaImpl; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventBrokerActor; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.scheduler.AgentsErrorMonitorActor; import io.mantisrx.master.scheduler.FakeMantisScheduler; import io.mantisrx.master.scheduler.JobMessageRouterImpl; import io.mantisrx.master.vm.AgentClusterOperationsImpl; import io.mantisrx.server.core.master.LocalMasterMonitor; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.master.AgentClustersAutoScaler; import io.mantisrx.server.master.LeaderRedirectionFilter; import io.mantisrx.server.master.LeadershipManagerLocalImpl; import io.mantisrx.server.master.persistence.IMantisStorageProvider; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.MantisStorageProviderAdapter; import io.mantisrx.server.master.store.SimpleCachedFileStorageProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observer; import java.time.Duration; import java.util.Collections; import java.util.HashSet; import java.util.concurrent.CompletionStage; public class MantisMasterAPI extends AllDirectives { public static final Logger logger = LoggerFactory.getLogger(MantisMasterAPI.class); private static void setupDummyAgentClusterAutoScaler() { final AutoScaleRule dummyAutoScaleRule = new AutoScaleRule() { @Override public String getRuleName() { return "test"; } @Override public int getMinIdleHostsToKeep() { return 1; } @Override public int getMaxIdleHostsToKeep() { return 10; } @Override public long getCoolDownSecs() { return 300; } @Override public boolean idleMachineTooSmall(VirtualMachineLease lease) { return false; } }; AgentClustersAutoScaler.initialize(() -> new HashSet<>(Collections.singletonList( dummyAutoScaleRule)), new Observer<AutoScaleAction>() { @Override public void onCompleted() { } @Override public void onError(Throwable e) { } @Override public void onNext(AutoScaleAction autoScaleAction) { } }); } public static void main(String[] args) throws Exception { // boot up server using the route as defined below int port = 8182; TestHelpers.setupMasterConfig(); ActorSystem system = ActorSystem.create("MantisMasterAPI"); final ActorRef actor = system.actorOf(Props.create(DeadLetterActor.class)); system.eventStream().subscribe(actor, DeadLetter.class); final Http http = Http.get(system); final ActorMaterializer materializer = ActorMaterializer.create(system); final AuditEventSubscriber auditEventSubscriber = new AuditEventSubscriberLoggingImpl(); ActorRef auditEventBrokerActor = system.actorOf(AuditEventBrokerActor.props( auditEventSubscriber), "AuditEventBroker"); final AuditEventSubscriber auditEventSubscriberAkka = new AuditEventSubscriberAkkaImpl( auditEventBrokerActor); final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl( auditEventSubscriberAkka, new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); IMantisStorageProvider storageProvider = new MantisStorageProviderAdapter( new SimpleCachedFileStorageProvider(), lifecycleEventPublisher); ActorRef jobClustersManager = system.actorOf( JobClustersManagerActor.props( new MantisJobStore(storageProvider), lifecycleEventPublisher), "JobClustersManager"); final FakeMantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManager); jobClustersManager.tell(new JobClusterManagerProto.JobClustersManagerInitialize( fakeScheduler, true), ActorRef.noSender()); // Schedulers.newThread().createWorker().schedulePeriodically(() -> jobClustersManager.tell(new NullPointerException(), ActorRef.noSender()),0, 100, TimeUnit.SECONDS); setupDummyAgentClusterAutoScaler(); final JobClusterRouteHandler jobClusterRouteHandler = new JobClusterRouteHandlerAkkaImpl( jobClustersManager); final JobRouteHandler jobRouteHandler = new JobRouteHandlerAkkaImpl(jobClustersManager); MasterDescription masterDescription = new MasterDescription( "localhost", "127.0.0.1", port, port + 2, port + 4, "api/postjobstatus", port + 6, System.currentTimeMillis()); final MasterDescriptionRoute masterDescriptionRoute = new MasterDescriptionRoute( masterDescription); Duration idleTimeout = system.settings() .config() .getDuration("akka.http.server.idle-timeout"); logger.info("idle timeout {} sec ", idleTimeout.getSeconds()); ActorRef agentsErrorMonitorActor = system.actorOf( AgentsErrorMonitorActor.props(), "AgentsErrorMonitor"); ActorRef statusEventBrokerActor = system.actorOf(StatusEventBrokerActor.props( agentsErrorMonitorActor), "StatusEventBroker"); agentsErrorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor( fakeScheduler), ActorRef.noSender()); final JobStatusRouteHandler jobStatusRouteHandler = new JobStatusRouteHandlerAkkaImpl( system, statusEventBrokerActor); final AgentClusterOperationsImpl agentClusterOperations = new AgentClusterOperationsImpl( storageProvider, new JobMessageRouterImpl(jobClustersManager), fakeScheduler, lifecycleEventPublisher, "cluster"); final JobDiscoveryRouteHandler jobDiscoveryRouteHandler = new JobDiscoveryRouteHandlerAkkaImpl( jobClustersManager, idleTimeout); final JobRoute v0JobRoute = new JobRoute(jobRouteHandler, system); final JobDiscoveryRoute v0JobDiscoveryRoute = new JobDiscoveryRoute(jobDiscoveryRouteHandler); final JobClusterRoute v0JobClusterRoute = new JobClusterRoute( jobClusterRouteHandler, jobRouteHandler, system); final JobStatusRoute v0JobStatusRoute = new JobStatusRoute(jobStatusRouteHandler); final AgentClusterRoute v0AgentClusterRoute = new AgentClusterRoute( agentClusterOperations, system); final JobClustersRoute v1JobClustersRoute = new JobClustersRoute(jobClusterRouteHandler, system); final JobsRoute v1JobsRoute = new JobsRoute(jobClusterRouteHandler, jobRouteHandler, system); final AdminMasterRoute v1AdminMasterRoute = new AdminMasterRoute(masterDescription); final AgentClustersRoute v1AgentClustersRoute = new AgentClustersRoute(agentClusterOperations); final JobDiscoveryStreamRoute v1JobDiscoveryStreamRoute = new JobDiscoveryStreamRoute(jobDiscoveryRouteHandler); final LastSubmittedJobIdStreamRoute v1LastSubmittedJobIdStreamRoute = new LastSubmittedJobIdStreamRoute(jobDiscoveryRouteHandler); final JobStatusStreamRoute v1JobStatusStreamRoute = new JobStatusStreamRoute(jobStatusRouteHandler); LocalMasterMonitor localMasterMonitor = new LocalMasterMonitor(masterDescription); LeadershipManagerLocalImpl leadershipMgr = new LeadershipManagerLocalImpl(masterDescription); leadershipMgr.setLeaderReady(); LeaderRedirectionFilter leaderRedirectionFilter = new LeaderRedirectionFilter( localMasterMonitor, leadershipMgr); final MantisMasterRoute app = new MantisMasterRoute( leaderRedirectionFilter, masterDescriptionRoute, v0JobClusterRoute, v0JobRoute, v0JobDiscoveryRoute, v0JobStatusRoute, v0AgentClusterRoute, v1JobClustersRoute, v1JobsRoute, v1AdminMasterRoute, v1AgentClustersRoute, v1JobDiscoveryStreamRoute, v1LastSubmittedJobIdStreamRoute, v1JobStatusStreamRoute ); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = app.createRoute() .flow(system, materializer); final CompletionStage<ServerBinding> binding = http.bindAndHandle( routeFlow, ConnectHttp.toHost( "localhost", port), materializer); binding.exceptionally(failure -> { System.err.println("Something very bad happened! " + failure.getMessage()); system.terminate(); return null; }); // Schedulers.newThread().createWorker().schedule(() -> leadershipMgr.stopBeingLeader(), 10, TimeUnit.SECONDS); System.out.println( "Server online at http://localhost:" + port + "/\nPress RETURN to stop..."); System.in.read(); // let it run until user presses return binding .thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done } }
4,228
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/LeaderRedirectionRouteTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route; import akka.NotUsed; import akka.actor.ActorSystem; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.HttpEntity; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import akka.util.ByteString; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.api.akka.route.v0.MasterDescriptionRoute; import io.mantisrx.master.jobcluster.job.JobTestHelper; import io.mantisrx.server.core.master.LocalMasterMonitor; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.core.master.MasterMonitor; import io.mantisrx.server.master.ILeadershipManager; import io.mantisrx.server.master.LeaderRedirectionFilter; import io.mantisrx.server.master.LeadershipManagerLocalImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import java.util.Optional; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class LeaderRedirectionRouteTest { private final static Logger logger = LoggerFactory.getLogger(LeaderRedirectionRouteTest.class); private final ActorMaterializer materializer = ActorMaterializer.create(system); private final Http http = Http.get(system); private static Thread t; private static final int serverPort = 8205; private static final int targetEndpointPort = serverPort; private static final MasterDescription fakeMasterDesc = new MasterDescription( "example.com", "127.0.0.1", targetEndpointPort, targetEndpointPort + 2, targetEndpointPort + 4, "api/postjobstatus", targetEndpointPort + 6, System.currentTimeMillis()); private CompletionStage<String> processRespFut(final HttpResponse r, final Optional<Integer> expectedStatusCode) { logger.info("headers {} {}", r.getHeaders(), r.status()); expectedStatusCode.ifPresent(sc -> assertEquals(sc.intValue(), r.status().intValue())); assert(r.getHeader("Access-Control-Allow-Origin").isPresent()); assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value()); CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer); return strictEntity.thenCompose(s -> s.getDataBytes() .runFold(ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer) .thenApply(s2 -> s2.utf8String()) ); } private String getResponseMessage(final String msg, final Throwable t) { if (t != null) { logger.error("got err ", t); fail(t.getMessage()); } else { return msg; } return ""; } private static CompletionStage<ServerBinding> binding; private static ActorSystem system = ActorSystem.create("MasterDescriptionRouteTest"); private static final MasterMonitor masterMonitor = new LocalMasterMonitor(fakeMasterDesc); private static final ILeadershipManager leadershipMgr = new LeadershipManagerLocalImpl(fakeMasterDesc); @BeforeClass public static void setup() throws Exception { JobTestHelper.deleteAllFiles(); JobTestHelper.createDirsIfRequired(); final CountDownLatch latch = new CountDownLatch(1); t = new Thread(() -> { try { // boot up server using the route as defined below final Http http = Http.get(system); final ActorMaterializer materializer = ActorMaterializer.create(system); TestHelpers.setupMasterConfig(); final MasterDescriptionRoute app = new MasterDescriptionRoute(fakeMasterDesc); final LeaderRedirectionFilter leaderRedirectionFilter = new LeaderRedirectionFilter(masterMonitor, leadershipMgr); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = app.createRoute(leaderRedirectionFilter::redirectIfNotLeader).flow(system, materializer); logger.info("starting test server on port {}", serverPort); latch.countDown(); binding = http.bindAndHandle(routeFlow, ConnectHttp.toHost("localhost", serverPort), materializer); } catch (Exception e) { logger.info("caught exception", e); latch.countDown(); e.printStackTrace(); } }); t.setDaemon(true); t.start(); latch.await(); } @AfterClass public static void teardown() { logger.info("MasterDescriptionRouteTest teardown"); binding .thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done t.interrupt(); } private String masterEndpoint(final String ep) { return String.format("http://127.0.0.1:%d/api/%s", targetEndpointPort, ep); } @Test public void testMasterInfoAPIWhenLeader() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); // leader is not ready by default final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(masterEndpoint("masterinfo"))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(503))) .whenComplete((msg, t) -> { try { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); assertEquals("Mantis master awaiting to be ready", responseMessage); } catch (Exception e) { fail("unexpected error "+ e.getMessage()); } latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); // mark the leader as bootstrapped and ready leadershipMgr.setLeaderReady(); final CountDownLatch latch2 = new CountDownLatch(1); final CompletionStage<HttpResponse> respF = http.singleRequest( HttpRequest.GET(masterEndpoint("masterinfo"))); respF .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((msg, t) -> { try { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); MasterDescription masterDescription = Jackson.fromJSON(responseMessage, MasterDescription.class); logger.info("master desc ---> {}", masterDescription); assertEquals(fakeMasterDesc, masterDescription); } catch (Exception e) { fail("unexpected error "+ e.getMessage()); } latch2.countDown(); }); assertTrue(latch2.await(2, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testMasterInfoAPIWhenLeader" }) public void testMasterInfoAPIWhenNotLeader() throws InterruptedException { leadershipMgr.stopBeingLeader(); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(masterEndpoint("masterinfo"))); try { responseFuture .thenCompose(r -> { logger.info("headers {} {}", r.getHeaders(), r.status()); assertEquals(302, r.status().intValue()); assert(r.getHeader("Access-Control-Allow-Origin").isPresent()); assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value()); assert(r.getHeader("Location").isPresent()); assertEquals("http://example.com:"+targetEndpointPort+"/api/masterinfo", r.getHeader("Location").get().value()); CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer); return strictEntity.thenCompose(s -> s.getDataBytes() .runFold(ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer) .thenApply(s2 -> s2.utf8String()) ); }) .whenComplete((msg, t) -> { try { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); } catch (Exception e) { fail("unexpected error "+ e.getMessage()); } }).toCompletableFuture() .get(2, TimeUnit.SECONDS); } catch (ExecutionException e) { throw new RuntimeException(e); } catch (TimeoutException e) { throw new RuntimeException(e); } leadershipMgr.becomeLeader(); testMasterInfoAPIWhenLeader(); } }
4,229
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/JacksonTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import com.google.protobuf.Descriptors; import com.google.protobuf.Message; //import com.google.protobuf.util.JsonFormat; import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter; //import io.mantisrx.master.api.proto.JobArchivedWorkersResponse; //import io.mantisrx.master.core.proto.WorkerMetadata; import io.mantisrx.master.jobcluster.job.MantisJobMetadataView; import io.mantisrx.server.master.store.MantisWorkerMetadataWritable; import org.junit.Ignore; import org.junit.Test; import java.io.IOException; import java.util.List; import java.util.Optional; import static org.junit.Assert.fail; public class JacksonTest { @Test @Ignore public void testDeser() throws IOException { final String json = "[{\"jobMetadata\":{\"jobId\":{\"jobNum\":1,\"cluster\":\"sine-function\",\"id\":\"sine-function-1\"},\"submittedAt\":1526923218174,\"jobDefinition\":{\"name\":\"sine-function\",\"user\":\"nmahilani\",\"artifactName\":\"\",\"version\":\"0.1.39 2018-03-13 09:40:53\",\"parameters\":[],\"jobSla\":{\"runtimeLimitSecs\":0,\"minRuntimeSecs\":0,\"slaType\":\"Lossy\",\"durationType\":\"Perpetual\",\"userProvidedType\":\"\"},\"subscriptionTimeoutSecs\":0,\"schedulingInfo\":{\"stages\":{\"1\":{\"numberOfInstances\":1,\"machineDefinition\":{\"cpuCores\":1.0,\"memoryMB\":1024.0,\"networkMbps\":128.0,\"diskMB\":1024.0,\"numPorts\":1},\"hardConstraints\":[],\"softConstraints\":[],\"scalingPolicy\":null,\"scalable\":true}}},\"numberOfStages\":1,\"labels\":[]},\"state\":\"Launched\",\"nextWorkerNumberToUse\":10,\"startedAt\":-1,\"endedAt\":-1,\"parameters\":[],\"user\":\"nmahilani\",\"labels\":[],\"clusterName\":\"sine-function\",\"submittedAtInstant\":{\"epochSecond\":1526923218,\"nano\":174000000},\"jobJarUrl\":\"http:\",\"startedAtInstant\":null,\"sla\":{\"runtimeLimitSecs\":0,\"minRuntimeSecs\":0,\"slaType\":\"Lossy\",\"durationType\":\"Perpetual\",\"userProvidedType\":\"\"},\"subscriptionTimeoutSecs\":0,\"minRuntimeSecs\":0,\"endedAtInstant\":null},\"stageMetadataList\":[],\"workerMetadataList\":[]}]"; List<MantisJobMetadataView> mantisJobMetadataViews = Jackson.fromJSON(json, new TypeReference<List<MantisJobMetadataView>>() { }); System.out.println(mantisJobMetadataViews.toString()); } @Test @Ignore public void testDeser2() throws IOException { List<MantisJobMetadataView> jobIdInfos = Jackson.fromJSON("[{\"jobMetadata\":{\"jobId\":\"sine-function-1\",\"name\":\"sine-function\",\"user\":\"nmahilani\",\"submittedAt\":1527703650220,\"jarUrl\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.2.9.zip\",\"numStages\":2,\"sla\":{\"runtimeLimitSecs\":0,\"minRuntimeSecs\":0,\"slaType\":\"Lossy\",\"durationType\":\"Perpetual\",\"userProvidedType\":\"\"},\"state\":\"Accepted\",\"subscriptionTimeoutSecs\":0,\"parameters\":[{\"name\":\"useRandom\",\"value\":\"True\"}],\"nextWorkerNumberToUse\":11,\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":25,\\\"intervalMs\\\":60000}\"},\"labels\":[{\"name\":\"_mantis.user\",\"value\":\"nmahilani\"},{\"name\":\"_mantis.ownerEmail\",\"value\":\"nmahilani@netflix.com\"},{\"name\":\"_mantis.jobType\",\"value\":\"other\"},{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.artifact.version\",\"value\":\"0.2.9\"}]},\"stageMetadataList\":[{\"jobId\":\"sine-function-1\",\"stageNum\":0,\"numStages\":2,\"machineDefinition\":{\"cpuCores\":1.0,\"memoryMB\":200.0,\"networkMbps\":128.0,\"diskMB\":1024.0,\"numPorts\":1},\"numWorkers\":1,\"hardConstraints\":null,\"softConstraints\":null,\"scalingPolicy\":null,\"scalable\":false},{\"jobId\":\"sine-function-1\",\"stageNum\":1,\"numStages\":2,\"machineDefinition\":{\"cpuCores\":1.0,\"memoryMB\":200.0,\"networkMbps\":128.0,\"diskMB\":1024.0,\"numPorts\":1},\"numWorkers\":1,\"hardConstraints\":[],\"softConstraints\":[\"M4Cluster\"],\"scalingPolicy\":{\"stage\":1,\"min\":1,\"max\":10,\"increment\":2,\"decrement\":1,\"coolDownSecs\":600,\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":15.0,\"scaleUpAbovePct\":75.0,\"rollingCount\":{\"count\":12,\"of\":20}}},\"enabled\":true},\"scalable\":true}],\"workerMetadataList\":[{\"workerIndex\":0,\"workerNumber\":2,\"jobId\":\"sine-function-1\",\"stageNum\":0,\"numberOfPorts\":4,\"metricsPort\":0,\"consolePort\":0,\"debugPort\":-1,\"ports\":[],\"state\":\"Accepted\",\"slave\":null,\"slaveID\":null,\"cluster\":{\"present\":false},\"acceptedAt\":1527703650231,\"launchedAt\":0,\"startingAt\":0,\"startedAt\":0,\"completedAt\":0,\"reason\":null,\"resubmitOf\":-1,\"totalResubmitCount\":0},{\"workerIndex\":0,\"workerNumber\":3,\"jobId\":\"sine-function-1\",\"stageNum\":1,\"numberOfPorts\":4,\"metricsPort\":0,\"consolePort\":0,\"debugPort\":-1,\"ports\":[],\"state\":\"Accepted\",\"slave\":null,\"slaveID\":null,\"cluster\":{\"present\":false},\"acceptedAt\":1527703650232,\"launchedAt\":0,\"startingAt\":0,\"startedAt\":0,\"completedAt\":0,\"reason\":null,\"resubmitOf\":-1,\"totalResubmitCount\":0}]}]", new TypeReference<List<MantisJobMetadataView>>() { }); System.out.println(jobIdInfos); } @Test @Ignore public void testDeser3() throws IOException { String json = "{ \"workers\":[{\"workerIndex\":0,\"workerNumber\":2,\"jobId\":\"sine-function-1\",\"stageNum\":1,\"numberOfPorts\":4,\"" + "workerPorts\":{\"metricsPort\":8000,\"debugPort\":9000,\"consolePort\":9010,\"ports\":[]}," + "\"state\":\"Started\",\"slave\":\"host1\",\"slaveID\":\"vm1\"," + "\"acceptedAt\":1528935765820,\"launchedAt\":1528935765869,\"startingAt\":1528935765872,\"startedAt\":1528935765877,\"completedAt\":-1," + "\"reason\":\"Normal\",\"resubmitOf\":0,\"totalResubmitCount\":0,\"isSubscribed\":false,\"cluster\":null," + "\"consolePort\":9010,\"metricsPort\":8000,\"ports\":{\"metricsPort\":8000,\"debugPort\":9000,\"consolePort\":9010," + "\"ports\":[]},\"debugPort\":9000,\"preferredClusterOptional\":null}]}"; String json2 = "[{\"workerIndex\":0,\"workerNumber\":2,\"jobId\":\"sine-function-1\",\"stageNum\":1,\"numberOfPorts\":4," + "\"metricsPort\":8000,\"consolePort\":9010,\"debugPort\":9000,\"ports\":[],\"state\":\"Started\",\"slave\":\"host1\"," + "\"slaveID\":\"vm1\",\"cluster\":null,\"acceptedAt\":1528936424143,\"launchedAt\":1528936424197," + "\"startingAt\":1528936424199,\"startedAt\":1528936424201,\"completedAt\":-1," + "\"reason\":\"Normal\",\"resubmitOf\":0,\"totalResubmitCount\":0}]"; // JobArchivedWorkersResponse resp = null; // JobArchivedWorkersResponse.Builder builder = JobArchivedWorkersResponse.newBuilder(); // Descriptors.FieldDescriptor workersFD = builder.getDescriptorForType().findFieldByName("workers"); // Message.Builder builder1 = builder.newBuilderForField(workersFD); //// JsonFormat.parser().usingTypeRegistry() // try { // JsonFormat.parser().ignoringUnknownFields().merge(json2, builder1); //// workers = Jackson.fromJSON(responseMessage, new TypeReference<List<IMantisWorkerMetadata>>() {}); // resp = builder.build(); // System.out.println(resp); // } catch (IOException e) { // e.printStackTrace(); // } } @Test public void testDeser4() throws IOException { final ObjectMapper objectMapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); List<MantisJobMetadataView> jobIdInfos = Jackson.fromJSON(objectMapper,"[{\"jobMetadata\":{\"jobId\":\"sine-function-1\",\"name\":\"sine-function\"," + "\"user\":\"nmahilani\",\"submittedAt\":1527703650220,\"jarUrl\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.2.9.zip\"," + "\"numStages\":2,\"sla\":{\"runtimeLimitSecs\":0,\"minRuntimeSecs\":0,\"slaType\":\"Lossy\",\"durationType\":\"Perpetual\",\"userProvidedType\":\"\"}," + "\"state\":\"Accepted\",\"subscriptionTimeoutSecs\":0,\"parameters\":[{\"name\":\"useRandom\",\"value\":\"True\"}],\"nextWorkerNumberToUse\":11," + "\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":25,\\\"intervalMs\\\":60000}\"}," + "\"labels\":[{\"name\":\"_mantis.user\",\"value\":\"nmahilani\"},{\"name\":\"_mantis.ownerEmail\",\"value\":\"nmahilani@netflix.com\"}," + "{\"name\":\"_mantis.jobType\",\"value\":\"other\"},{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.artifact.version\",\"value\":\"0.2.9\"}]}," + "\"stageMetadataList\":[{\"jobId\":\"sine-function-1\",\"stageNum\":0,\"numStages\":2,\"machineDefinition\":{\"cpuCores\":1.0,\"memoryMB\":200.0,\"networkMbps\":128.0,\"diskMB\":1024.0,\"numPorts\":1}," + "\"numWorkers\":1,\"hardConstraints\":null,\"softConstraints\":null,\"scalingPolicy\":null,\"scalable\":false}," + "{\"jobId\":\"sine-function-1\",\"stageNum\":1,\"numStages\":2,\"machineDefinition\":{\"cpuCores\":1.0,\"memoryMB\":200.0,\"networkMbps\":128.0,\"diskMB\":1024.0,\"numPorts\":1},\"numWorkers\":1,\"hardConstraints\":[],\"softConstraints\":[\"M4Cluster\"]," + "\"scalingPolicy\":{\"stage\":1,\"min\":1,\"max\":10,\"increment\":2,\"decrement\":1,\"coolDownSecs\":600," + "\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":15.0,\"scaleUpAbovePct\":75.0,\"rollingCount\":{\"count\":12,\"of\":20}}},\"enabled\":true},\"scalable\":true}]," + "\"workerMetadataList\":[{\"workerIndex\":0,\"workerNumber\":2,\"jobId\":\"sine-function-1\",\"stageNum\":0,\"numberOfPorts\":4,\"metricsPort\":0,\"consolePort\":0," + "\"debugPort\":-1,\"ports\":[],\"state\":\"Accepted\",\"slave\":null,\"slaveID\":null,\"cluster\":{\"present\":false},\"acceptedAt\":1527703650231,\"launchedAt\":0,\"startingAt\":0,\"startedAt\":0," + "\"completedAt\":0,\"reason\":null,\"resubmitOf\":-1,\"totalResubmitCount\":0},{\"workerIndex\":0,\"workerNumber\":3,\"jobId\":\"sine-function-1\",\"stageNum\":1,\"numberOfPorts\":4,\"metricsPort\":0,\"consolePort\":0,\"debugPort\":-1,\"ports\":[],\"state\":\"Accepted\"," + "\"slave\":null,\"slaveID\":null,\"cluster\":{\"present\":false},\"acceptedAt\":1527703650232,\"launchedAt\":0,\"startingAt\":0,\"startedAt\":0,\"completedAt\":0," + "\"reason\":null,\"resubmitOf\":-1,\"totalResubmitCount\":0}]}]", new TypeReference<List<MantisJobMetadataView>>() { }); System.out.println(jobIdInfos); MantisWorkerMetadataWritable mwm = jobIdInfos.get(0).getWorkerMetadataList().get(0); mwm.setCluster(Optional.ofNullable("test")); System.out.println(objectMapper.writer(Jackson.DEFAULT_FILTER_PROVIDER).writeValueAsString(mwm)); } }
4,230
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/LeaderRedirectionFilterTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import akka.http.javadsl.server.AllDirectives; import akka.http.javadsl.server.Route; import io.mantisrx.server.core.master.LocalMasterMonitor; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.core.master.MasterMonitor; import io.mantisrx.server.master.ILeadershipManager; import io.mantisrx.server.master.LeaderRedirectionFilter; import io.mantisrx.server.master.LeadershipManagerLocalImpl; import org.junit.Test; public class LeaderRedirectionFilterTest extends AllDirectives { @Test public void testRouteUnchangedIfLeader() { // Become leader and make Master monitor return the localhost master, filter should return input Route final MasterDescription fakeMasterDesc = new MasterDescription( "localhost", "127.0.0.1", 8100, 8100 + 2, 8100 + 4, "api/postjobstatus", 8100 + 6, System.currentTimeMillis()); MasterMonitor masterMonitor = new LocalMasterMonitor(fakeMasterDesc); ILeadershipManager leadershipManager = new LeadershipManagerLocalImpl(fakeMasterDesc); leadershipManager.becomeLeader(); LeaderRedirectionFilter filter = new LeaderRedirectionFilter(masterMonitor, leadershipManager); Route testRoute = route(path("test", () -> complete("done"))); Route route = filter.redirectIfNotLeader(testRoute); // leader is not ready by default assertNotEquals(testRoute, route); // mark leader ready leadershipManager.setLeaderReady(); Route route2 = filter.redirectIfNotLeader(testRoute); // leader is not ready by default assertEquals(testRoute, route2); } @Test public void testRouteChangesIfNotLeader() { final MasterDescription fakeMasterDesc = new MasterDescription( "localhost", "127.0.0.1", 8100, 8100 + 2, 8100 + 4, "api/postjobstatus", 8100 + 6, System.currentTimeMillis()); MasterMonitor masterMonitor = new LocalMasterMonitor(fakeMasterDesc); ILeadershipManager leadershipManager = new LeadershipManagerLocalImpl(fakeMasterDesc); // Stop being leader, the filter should redirect so the returned Route is different from the input Route leadershipManager.stopBeingLeader(); LeaderRedirectionFilter filter = new LeaderRedirectionFilter(masterMonitor, leadershipManager); Route testRoute = route(path("test", () -> complete("done"))); Route route = filter.redirectIfNotLeader(testRoute); // filter should return input Route if we are current leader assertNotEquals(testRoute, route); } }
4,231
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v1/JobClustersRouteTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.NotUsed; import akka.actor.ActorRef; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.ContentTypes; import akka.http.javadsl.model.HttpEntities; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.http.javadsl.model.StatusCodes; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import io.mantisrx.shaded.com.fasterxml.jackson.databind.JsonNode; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandlerAkkaImpl; import io.mantisrx.master.events.*; import io.mantisrx.master.scheduler.FakeMantisScheduler; import io.mantisrx.master.JobClustersManagerActor; import io.mantisrx.master.api.akka.payloads.JobClusterPayloads; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.SimpleCachedFileStorageProvider; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.mockito.Mockito; import org.omg.PortableInterceptor.NON_EXISTENT; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import java.io.IOException; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Function; import static junit.framework.TestCase.assertNull; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; public class JobClustersRouteTest extends RouteTestBase { private final static Logger logger = LoggerFactory.getLogger(JobClustersRouteTest.class); private static Thread t; private static final int SERVER_PORT = 8200; private static CompletionStage<ServerBinding> binding; private static String TEST_CLUSTER_NAME = "sine-function"; JobClustersRouteTest() { super("JobClustersRouteTest", SERVER_PORT); } @BeforeClass public void setup() throws Exception { TestHelpers.setupMasterConfig(); final CountDownLatch latch = new CountDownLatch(1); t = new Thread(() -> { try { // boot up server using the route as defined below final Http http = Http.get(system); final ActorMaterializer materializer = ActorMaterializer.create(system); final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl( new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); ActorRef jobClustersManagerActor = system.actorOf( JobClustersManagerActor.props( new MantisJobStore(new SimpleCachedFileStorageProvider(true)), lifecycleEventPublisher), "jobClustersManager"); MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor); jobClustersManagerActor.tell( new JobClusterManagerProto.JobClustersManagerInitialize( fakeScheduler, false), ActorRef.noSender()); final JobClusterRouteHandler jobClusterRouteHandler = new JobClusterRouteHandlerAkkaImpl( jobClustersManagerActor); final JobClustersRoute app = new JobClustersRoute(jobClusterRouteHandler, system); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = app.createRoute(Function.identity()) .flow(system, materializer); logger.info("starting test server on port {}", SERVER_PORT); latch.countDown(); binding = http.bindAndHandle( routeFlow, ConnectHttp.toHost("localhost", SERVER_PORT), materializer); } catch (Exception e) { logger.info("caught exception", e); latch.countDown(); e.printStackTrace(); } }); t.setDaemon(true); t.start(); latch.await(); } @AfterClass public void teardown() { logger.info("V1JobClusterRouteTest teardown"); binding.thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done t.interrupt(); } @Test public void cleanupExistingJobs() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.DELETE(getJobClusterInstanceEndpoint(TEST_CLUSTER_NAME)) ); responseFuture.whenComplete((msg, t) -> latch.countDown()); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"cleanupExistingJobs"}) public void testJobClusterCreate() throws InterruptedException { testPost( getJobClustersEndpoint(), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_CREATE), StatusCodes.CREATED, this::compareClusterInstancePayload); assert this.isClusterExist(TEST_CLUSTER_NAME); } @Test(dependsOnMethods = {"testJobClusterCreate"}) public void testDuplicateJobClusterCreate() throws InterruptedException { testPost( getJobClustersEndpoint(), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_CREATE), StatusCodes.CONFLICT, null); } @Test(dependsOnMethods = {"testDuplicateJobClusterCreate"}) public void testNonExistentJobClusterLatestJobDiscoveryInfo() throws InterruptedException { testGet( getJobClusterLatestJobDiscoveryInfoEp("NonExistentCluster"), StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = {"testDuplicateJobClusterCreate"}) public void testJobClusterLatestJobDiscoveryInfoNoRunningJobs() throws InterruptedException { testGet( getJobClusterLatestJobDiscoveryInfoEp(TEST_CLUSTER_NAME), StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = "testDuplicateJobClusterCreate") public void testJobClustersList() throws InterruptedException { testGet( getJobClustersEndpoint(), StatusCodes.OK, this::compareClustersPayload ); } @Test() public void testJobClustersDelete() throws InterruptedException { testDelete( getJobClustersEndpoint(), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test() public void testJobClustersPut() throws InterruptedException { testPut( getJobClustersEndpoint(), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClustersList") public void testJobClusterInstanceGET() throws InterruptedException { testGet( getJobClusterInstanceEndpoint(TEST_CLUSTER_NAME), StatusCodes.OK, this::compareClusterInstancePayload); } @Test(dependsOnMethods = "testJobClusterInstanceGET") public void testNonExistentJobClusterInstanceGET() throws InterruptedException { testGet( getJobClusterInstanceEndpoint("doesNotExist"), StatusCodes.NOT_FOUND, null ); } @Test(dependsOnMethods = "testNonExistentJobClusterInstanceGET") public void testJobClusterInstancePOSTNotAllowed() throws InterruptedException { testPost( getJobClusterInstanceEndpoint(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterInstancePOSTNotAllowed") public void testJobClusterInstanceValidUpdate() throws InterruptedException { testPut( getJobClusterInstanceEndpoint(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_VALID_UPDATE), StatusCodes.OK, this::compareClusterInstancePayload); } @Test(dependsOnMethods = "testJobClusterInstanceValidUpdate") public void testJobClusterInstanceInvalidUpdate() throws InterruptedException { testPut( getJobClusterInstanceEndpoint(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_INVALID_UPDATE), StatusCodes.BAD_REQUEST, null); } @Test(dependsOnMethods = "testJobClusterInstanceInvalidUpdate") public void testJobClusterInstanceNonExistentUpdate() throws InterruptedException { testPut( getJobClusterInstanceEndpoint("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_INVALID_UPDATE), StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = "testJobClusterInstanceNonExistentUpdate") public void testJobClusterNonExistentDelete() throws InterruptedException { testDelete( getJobClusterInstanceEndpoint("NonExistent") + "?user=test&reason=unittest", StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = "testJobClusterNonExistentDelete") public void testJobClusterActionUpdateArtifactPost() throws InterruptedException { testPost( getJobClusterUpdateArtifactEp(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT), StatusCodes.NO_CONTENT, EMPTY_RESPONSE_VALIDATOR); } @Test(dependsOnMethods = "testJobClusterActionUpdateArtifactPost") public void testJobClusterActionUpdateArtifactPostNonExistent() throws InterruptedException { testPost( getJobClusterUpdateArtifactEp("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT), StatusCodes.BAD_REQUEST, (m) -> { assert m.contains( "Cluster name specified in request payload sine-function does " + "not match with what specified in resource path NonExistent"); }); } @Test(dependsOnMethods = "testJobClusterActionUpdateArtifactPostNonExistent") public void testJobClusterActionUpdateArtifactPostNonMatchedResource() throws InterruptedException { testPost( getJobClusterUpdateArtifactEp("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT_NON_EXISTENT), StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateArtifactPostNonMatchedResource") public void testJobClusterActionUpdateArtifactGetNotAllowed() throws InterruptedException { testGet( getJobClusterUpdateArtifactEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateArtifactGetNotAllowed") public void testJobClusterActionUpdateArtifactPUTNotAllowed() throws InterruptedException { testPut( getJobClusterUpdateArtifactEp(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateArtifactPUTNotAllowed") public void testJobClusterActionUpdateArtifactDELETENotAllowed() throws InterruptedException { testDelete( getJobClusterUpdateArtifactEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null ); } /** test Update SLA actions **/ @Test(dependsOnMethods = "testJobClusterActionUpdateArtifactDELETENotAllowed") public void testJobClusterActionUpdateSlaPost() throws InterruptedException { testPost(getJobClusterUpdateSlaEp(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_UPDATE_SLA), StatusCodes.NO_CONTENT, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateSlaPost") public void testJobClusterActionUpdateSlaPostNonExistent() throws InterruptedException { testPost( getJobClusterUpdateSlaEp("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_UPDATE_SLA_NONEXISTENT), StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateSlaPostNonExistent") public void testJobClusterActionUpdateSlaPostNonMatchedResource() throws InterruptedException { testPost( getJobClusterUpdateSlaEp("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_UPDATE_SLA), StatusCodes.BAD_REQUEST, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateSlaPostNonMatchedResource") public void testJobClusterActionUpdateSlaGetNotAllowed() throws InterruptedException { testGet( getJobClusterUpdateSlaEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateSlaGetNotAllowed") public void testJobClusterActionUpdateSlaPUTNotAllowed() throws InterruptedException { testPut( getJobClusterUpdateSlaEp(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_UPDATE_SLA), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateArtifactPUTNotAllowed") public void testJobClusterActionUpdateSlaDELETENotAllowed() throws InterruptedException { testDelete( getJobClusterUpdateSlaEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null); } /** Update migration strategy actions tests **/ @Test(dependsOnMethods = "testJobClusterActionUpdateSlaDELETENotAllowed") public void testJobClusterActionUpdateMigrationPost() throws InterruptedException { testPost( getJobClusterUpdateMigrationStrategyEp(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.MIGRATE_STRATEGY_UPDATE), StatusCodes.NO_CONTENT, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateMigrationPost") public void testJobClusterActionUpdateMigrationPostNonExistent() throws InterruptedException { testPost( getJobClusterUpdateMigrationStrategyEp("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.MIGRATE_STRATEGY_UPDATE_NONEXISTENT), StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateMigrationPostNonExistent") public void testJobClusterActionUpdateMigrationPostNonMatchedResource() throws InterruptedException { testPost( getJobClusterUpdateMigrationStrategyEp("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.MIGRATE_STRATEGY_UPDATE), StatusCodes.BAD_REQUEST, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateMigrationPostNonMatchedResource") public void testJobClusterActionUpdateMigrationGetNotAllowed() throws InterruptedException { testGet( getJobClusterUpdateMigrationStrategyEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateMigrationGetNotAllowed") public void testJobClusterActionUpdateMigrationPUTNotAllowed() throws InterruptedException { testPut( getJobClusterUpdateMigrationStrategyEp(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.MIGRATE_STRATEGY_UPDATE), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateMigrationPUTNotAllowed") public void testJobClusterActionUpdateMigrationDELETENotAllowed() throws InterruptedException { testDelete( getJobClusterUpdateMigrationStrategyEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null); } /** Update label actions tests **/ @Test(dependsOnMethods = "testJobClusterActionUpdateMigrationDELETENotAllowed") public void testJobClusterActionUpdateLabelPost() throws InterruptedException { testPost( getJobClusterUpdateLabelEp(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_UPDATE_LABELS), StatusCodes.NO_CONTENT, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateLabelPost") public void testJobClusterActionUpdateLabelPostNonExistent() throws InterruptedException { testPost( getJobClusterUpdateLabelEp("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_UPDATE_LABELS_NONEXISTENT), StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateLabelPostNonExistent") public void testJobClusterActionUpdateLabelPostNonMatchedResource() throws InterruptedException { testPost( getJobClusterUpdateLabelEp("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_UPDATE_LABELS), StatusCodes.BAD_REQUEST, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateLabelPostNonMatchedResource") public void testJobClusterActionUpdateLabelGetNotAllowed() throws InterruptedException { testGet( getJobClusterUpdateLabelEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateLabelGetNotAllowed") public void testJobClusterActionUpdateLabelPUTNotAllowed() throws InterruptedException { testPut( getJobClusterUpdateLabelEp(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_UPDATE_LABELS), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionUpdateLabelPUTNotAllowed") public void testJobClusterActionUpdateLabelDELETENotAllowed() throws InterruptedException { testDelete( getJobClusterUpdateLabelEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null); } /** enable cluster action test **/ @Test(dependsOnMethods = "testJobClusterActionUpdateLabelDELETENotAllowed") public void testJobClusterActionEnablePost() throws InterruptedException { testPost( getJobClusterEnableEp(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_ENABLE), StatusCodes.NO_CONTENT, null); } @Test(dependsOnMethods = "testJobClusterActionEnablePost") public void testJobClusterActionEnablePostNonExistent() throws InterruptedException { testPost( getJobClusterEnableEp("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_ENABLE_NONEXISTENT), StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = "testJobClusterActionEnablePostNonExistent") public void testJobClusterActionEnablePostNonMatchedResource() throws InterruptedException { testPost( getJobClusterEnableEp("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_ENABLE), StatusCodes.BAD_REQUEST, null); } @Test(dependsOnMethods = "testJobClusterActionEnablePostNonMatchedResource") public void testJobClusterActionEnableGetNotAllowed() throws InterruptedException { testGet(getJobClusterEnableEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionEnableGetNotAllowed") public void testJobClusterActionEnablePUTNotAllowed() throws InterruptedException { testPut(getJobClusterEnableEp(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_ENABLE), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionEnablePUTNotAllowed") public void testJobClusterActionEnableDELETENotAllowed() throws InterruptedException { testDelete(getJobClusterEnableEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null); } /** disable cluster action test **/ @Test(dependsOnMethods = "testJobClusterActionEnableDELETENotAllowed") public void testJobClusterActionDisablePost() throws InterruptedException { testPost( getJobClusterDisableEp(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DISABLE), StatusCodes.NO_CONTENT, null ); } @Test(dependsOnMethods = "testJobClusterActionDisablePost") public void testJobClusterActionDisablePostNonExistent() throws InterruptedException { testPost( getJobClusterDisableEp("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DISABLE_NONEXISTENT), StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = "testJobClusterActionDisablePostNonExistent") public void testJobClusterActionDisablePostNonMatchedResource() throws InterruptedException { testPost( getJobClusterDisableEp("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DISABLE), StatusCodes.BAD_REQUEST, null); } @Test(dependsOnMethods = "testJobClusterActionDisablePostNonMatchedResource") public void testJobClusterActionDisableGetNotAllowed() throws InterruptedException { testGet(getJobClusterDisableEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionDisableGetNotAllowed") public void testJobClusterActionDisablePUTNotAllowed() throws InterruptedException { testPut( getJobClusterDisableEp(TEST_CLUSTER_NAME), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DISABLE), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionDisablePUTNotAllowed") public void testJobClusterActionDisableDELETENotAllowed() throws InterruptedException { testDelete(getJobClusterDisableEp(TEST_CLUSTER_NAME), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = "testJobClusterActionDisableDELETENotAllowed") public void testJobClusterDeleteWithoutRequiredParam() throws InterruptedException { testDelete( getJobClusterInstanceEndpoint("sine-function"), StatusCodes.BAD_REQUEST, null); } @Test(dependsOnMethods = "testJobClusterDeleteWithoutRequiredParam") public void testJobClusterValidDelete() throws InterruptedException { assert isClusterExist("sine-function"); testDelete(getJobClusterInstanceEndpoint("sine-function") + "?user=test&reason=unittest", StatusCodes.ACCEPTED, null); boolean clusterExist = isClusterExist("sine-function"); int retry = 10; while (clusterExist && retry > 0) { Thread.sleep(1000); clusterExist = isClusterExist("sine-function"); retry--; } assert !clusterExist; } private void compareClusterInstancePayload(String clusterGetResponse) { try { ObjectMapper mapper = new ObjectMapper(); JsonNode requestObj = mapper.readTree(JobClusterPayloads.JOB_CLUSTER_CREATE); JsonNode responseObj = mapper.readTree(clusterGetResponse); assertEquals( responseObj.get("name").toString(), requestObj.get("jobDefinition").get("name").toString()); assertEquals( responseObj.get("jars").get(0).get("url").toString(), requestObj.get("jobDefinition").get("jobJarFileLocation").toString()); assertEquals( responseObj.get("jars").get(0).get("version").toString(), requestObj.get("jobDefinition").get("version").toString()); } catch (IOException ex) { assert ex == null; } } private void compareClustersPayload(String clusterListResponse) { try { ObjectMapper mapper = new ObjectMapper(); JsonNode responseObj = mapper.readTree(clusterListResponse); assert (responseObj.get("list") != null); assert (responseObj.get("prev") != null); assert (responseObj.get("next") != null); compareClusterInstancePayload(responseObj.get("list").get(0).toString()); } catch (IOException ex) { assert ex == null; } } }
4,232
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v1/RouteTestBase.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.actor.ActorSystem; import akka.http.javadsl.Http; import akka.http.javadsl.model.HttpEntity; import akka.http.javadsl.model.HttpMethod; import akka.http.javadsl.model.HttpMethods; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.http.javadsl.model.RequestEntity; import akka.http.javadsl.model.StatusCode; import akka.stream.ActorMaterializer; import akka.util.ByteString; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.util.Strings; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; abstract class RouteTestBase { private final static Logger logger = LoggerFactory.getLogger(RouteTestBase.class); final ActorSystem system; final ActorMaterializer materializer; final Http http; final private int serverPort; static ResponseValidatorFunc EMPTY_RESPONSE_VALIDATOR = (msg) -> { assertTrue(String.format("response [%s] is not empty", msg), Strings.isNullOrEmpty(msg)); }; RouteTestBase(String testName, int port) { this.system = ActorSystem.create(testName); this.materializer = ActorMaterializer.create(system); this.http = Http.get(system); this.serverPort = port; } final String getJobClustersEndpoint() { return String.format( "http://127.0.0.1:%d/api/v1/jobClusters", serverPort); } final String getJobClusterInstanceEndpoint(String clusterName) { return String.format( "http://127.0.0.1:%d/api/v1/jobClusters/%s", serverPort, clusterName); } final String getJobClusterLatestJobDiscoveryInfoEp(String clusterName) { return String.format( "http://127.0.0.1:%d/api/v1/jobClusters/%s/latestJobDiscoveryInfo", serverPort, clusterName); } final String getJobClusterUpdateArtifactEp(String clusterName) { return getJobClusterInstanceEndpoint(clusterName) + "/actions/updateArtifact"; } final String getJobClusterUpdateSlaEp(String clusterName) { return getJobClusterInstanceEndpoint(clusterName) + "/actions/updateSla"; } final String getJobClusterUpdateMigrationStrategyEp(String clusterName) { return getJobClusterInstanceEndpoint(clusterName) + "/actions/updateMigrationStrategy"; } final String getJobClusterUpdateLabelEp(String clusterName) { return getJobClusterInstanceEndpoint(clusterName) + "/actions/updateLabel"; } final String getJobClusterEnableEp(String clusterName) { return getJobClusterInstanceEndpoint(clusterName) + "/actions/enableCluster"; } final String getJobClusterDisableEp(String clusterName) { return getJobClusterInstanceEndpoint(clusterName) + "/actions/disableCluster"; } final String getJobsEndpoint() { return String.format( "http://127.0.0.1:%d/api/v1/jobs", serverPort); } final String getClusterJobsEndpoint(String clusterName) { return String.format( "http://127.0.0.1:%d/api/v1/jobClusters/%s/jobs", serverPort, clusterName); } final String getJobInstanceEndpoint(String clusterName, String jobId) { return String.format( "http://127.0.0.1:%d/api/v1/jobClusters/%s/jobs/%s", serverPort, clusterName, jobId); } final String getJobInstanceEndpoint(String jobId) { return String.format( "http://127.0.0.1:%d/api/v1/jobs/%s", serverPort, jobId); } CompletionStage<String> processRespFut( final HttpResponse r, final int expectedStatusCode) { logger.info("headers {} {}", r.getHeaders(), r.status()); logger.info("response entity: {}", r.entity()); assertEquals(expectedStatusCode, r.status().intValue()); if (r.getHeader("Access-Control-Allow-Origin").isPresent()) { assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value()); } CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer); return strictEntity.thenCompose(s -> s.getDataBytes() .runFold( ByteString.emptyByteString(), ByteString::concat, materializer) .thenApply(ByteString::utf8String) ); } String getResponseMessage(final String msg, final Throwable t) { if (t != null) { logger.error("got err ", t); fail(t.getMessage()); } else { logger.info("got response {}", msg); return msg; } logger.info("got empty response {}"); return ""; } boolean isClusterExist(String clusterName) { final boolean result = http.singleRequest(HttpRequest.GET(getJobClusterInstanceEndpoint(clusterName))) .thenApply(r -> r.status().intValue() != 404) .toCompletableFuture() .handle((x, y) -> x) .join(); return result; } void deleteClusterIfExist(String clusterName) throws InterruptedException { if (isClusterExist(clusterName)) { final CountDownLatch latch = new CountDownLatch(1); http.singleRequest(HttpRequest.DELETE(getJobClusterInstanceEndpoint(clusterName))) .thenCompose(r -> processRespFut(r, 202)) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response {}", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } else { logger.info("Cluster {} does not exist, no need to delete", clusterName); } } void testGet( String endpoint, StatusCode expectedResponseCode, ResponseValidatorFunc validatorFunc) throws InterruptedException { testHttpRequest( HttpMethods.GET, endpoint, expectedResponseCode, validatorFunc); } void testPost( String endpoint, RequestEntity requestEntity, StatusCode expectedResponseCode, ResponseValidatorFunc validatorFunc) throws InterruptedException { testHttpRequest( HttpMethods.POST, endpoint, requestEntity, expectedResponseCode, validatorFunc); } void testPost( String endpoint, StatusCode expectedResponseCode, ResponseValidatorFunc validatorFunc) throws InterruptedException { testHttpRequest( HttpMethods.POST, endpoint, expectedResponseCode, validatorFunc); } void testPut( String endpoint, StatusCode expectedResponseCode, ResponseValidatorFunc validatorFunc) throws InterruptedException { testHttpRequest( HttpMethods.PUT, endpoint, expectedResponseCode, validatorFunc); } void testPut( String endpoint, RequestEntity requestEntity, StatusCode expectedResponseCode, ResponseValidatorFunc validatorFunc) throws InterruptedException { testHttpRequest( HttpMethods.PUT, endpoint, requestEntity, expectedResponseCode, validatorFunc); } void testDelete( String endpoint, StatusCode expectedResponseCode, ResponseValidatorFunc validatorFunc) throws InterruptedException { testHttpRequest(HttpMethods.DELETE, endpoint, expectedResponseCode, validatorFunc); } void testHttpRequest( HttpMethod httpMethod, String endpoint, StatusCode expectedResponseCode, ResponseValidatorFunc validatorFunc) throws InterruptedException { testHttpRequest( HttpRequest.create().withMethod(httpMethod).withUri(endpoint), expectedResponseCode, validatorFunc); } private void testHttpRequest( HttpMethod httpMethod, String endpoint, RequestEntity requestEntity, StatusCode expectedResponseCode, ResponseValidatorFunc validatorFunc) throws InterruptedException { testHttpRequest( HttpRequest.create() .withMethod(httpMethod) .withUri(endpoint) .withEntity(requestEntity), expectedResponseCode, validatorFunc); } private void testHttpRequest( HttpRequest request, StatusCode expectedResponseCode, ResponseValidatorFunc validatorFunc) throws InterruptedException { assert request != null; logger.info(request.toString()); final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest(request); try { responseFuture .thenCompose(r -> processRespFut(r, expectedResponseCode.intValue())) .whenComplete((msg, t) -> { logger.info("got response: {}", msg); assert t == null; if (null != validatorFunc) { validatorFunc.validate(msg); } latch.countDown(); }) .toCompletableFuture() .get(2, TimeUnit.SECONDS); } catch (ExecutionException e) { throw new RuntimeException(e); } catch (TimeoutException e) { throw new RuntimeException(e); } } @FunctionalInterface interface ResponseValidatorFunc { void validate(String response); } }
4,233
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v1/TestMantisClient.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.core.NamedJobInfo; import io.mantisrx.server.core.master.MasterDescription; import io.netty.buffer.ByteBuf; import io.netty.handler.codec.http.HttpResponseStatus; import mantis.io.reactivex.netty.RxNetty; import mantis.io.reactivex.netty.pipeline.PipelineConfigurators; import mantis.io.reactivex.netty.protocol.http.client.HttpClient; import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest; import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse; import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.functions.Func1; import rx.functions.Func2; import java.io.IOException; import java.util.concurrent.TimeUnit; public class TestMantisClient { private static final Logger logger = LoggerFactory.getLogger(TestMantisClient.class); private final int serverPort; public TestMantisClient(final int serverPort) { this.serverPort = serverPort; } private final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic = attempts -> attempts .zipWith(Observable.range(1, Integer.MAX_VALUE), (Func2<Throwable, Integer, Integer>) (t1, integer) -> integer) .flatMap((Func1<Integer, Observable<?>>) integer -> { long delay = 2 * (integer > 10 ? 10 : integer); logger.info(": retrying conx after sleeping for " + delay + " secs"); return Observable.timer(delay, TimeUnit.SECONDS); }); private final Func1<Observable<? extends Void>, Observable<?>> repeatLogic = attempts -> attempts .zipWith(Observable.range(1, Integer.MAX_VALUE), (Func2<Void, Integer, Integer>) (t1, integer) -> integer) .flatMap((Func1<Integer, Observable<?>>) integer -> { long delay = 2 * (integer > 10 ? 10 : integer); logger.warn("On Complete received! : repeating conx after sleeping for " + delay + " secs"); return Observable.timer(delay, TimeUnit.SECONDS); }); private HttpClient<ByteBuf, ServerSentEvent> getRxnettySseClient(String hostname, int port) { return RxNetty.<ByteBuf, ServerSentEvent>newHttpClientBuilder(hostname, port) .pipelineConfigurator(PipelineConfigurators.<ByteBuf>clientSseConfigurator()) // .enableWireLogging(LogLevel.INFO) .withNoConnectionPooling().build(); } public Observable<JobSchedulingInfo> discoveryStream(final String jobId, final Func1<Observable<? extends Throwable>, Observable<?>> retryFn, final Func1<Observable<? extends Void>, Observable<?>> repeatFn) { return Observable.just(new MasterDescription("localhost", "127.0.0.1", serverPort, serverPort, serverPort, "/api/postjobstatus", serverPort, System.currentTimeMillis())) .retryWhen(retryFn) .switchMap(new Func1<MasterDescription, Observable<JobSchedulingInfo>>() { @Override public Observable<JobSchedulingInfo> call(MasterDescription masterDescription) { return getRxnettySseClient(masterDescription.getHostname(), masterDescription.getSchedInfoPort()) .submit(HttpClientRequest.createGet("/api/v1/jobDiscoveryStream/" + jobId + "?sendHB=true")) .flatMap(new Func1<HttpClientResponse<ServerSentEvent>, Observable<JobSchedulingInfo>>() { @Override public Observable<JobSchedulingInfo> call(HttpClientResponse<ServerSentEvent> response) { if (!HttpResponseStatus.OK.equals(response.getStatus())) { return Observable.error(new Exception(response.getStatus().reasonPhrase())); } return response.getContent() .map(new Func1<ServerSentEvent, JobSchedulingInfo>() { @Override public JobSchedulingInfo call(ServerSentEvent event) { try { return Jackson.fromJSON(event.contentAsString(), JobSchedulingInfo.class); } catch (IOException e) { throw new RuntimeException("Invalid schedInfo json: " + e.getMessage(), e); } } }) .timeout(3 * 60, TimeUnit.SECONDS) .filter(new Func1<JobSchedulingInfo, Boolean>() { @Override public Boolean call(JobSchedulingInfo schedulingInfo) { return schedulingInfo != null && !JobSchedulingInfo.HB_JobId.equals(schedulingInfo.getJobId()); } }) ; } }) ; } }) .repeatWhen(repeatFn) .retryWhen(retryFn) ; } public Observable<JobSchedulingInfo> discoveryStream(final String jobId) { return discoveryStream(jobId, retryLogic, repeatLogic); } public Observable<NamedJobInfo> namedJobInfo(final String jobName, final Func1<Observable<? extends Throwable>, Observable<?>> retryFn, final Func1<Observable<? extends Void>, Observable<?>> repeatFn) { return Observable.just(new MasterDescription("localhost", "127.0.0.1", serverPort, serverPort, serverPort, "/api/postjobstatus", serverPort, System.currentTimeMillis())) .filter(new Func1<MasterDescription, Boolean>() { @Override public Boolean call(MasterDescription masterDescription) { return masterDescription != null; } }) .retryWhen(retryFn) .switchMap(new Func1<MasterDescription, Observable<NamedJobInfo>>() { @Override public Observable<NamedJobInfo> call(MasterDescription masterDescription) { return getRxnettySseClient(masterDescription.getHostname(), masterDescription.getSchedInfoPort()) .submit(HttpClientRequest.createGet("/api/v1/lastSubmittedJobIdStream/" + jobName + "?sendHB=true")) .flatMap(new Func1<HttpClientResponse<ServerSentEvent>, Observable<NamedJobInfo>>() { @Override public Observable<NamedJobInfo> call(HttpClientResponse<ServerSentEvent> response) { if(!HttpResponseStatus.OK.equals(response.getStatus())) return Observable.error(new Exception(response.getStatus().reasonPhrase())); return response.getContent() .map(new Func1<ServerSentEvent, NamedJobInfo>() { @Override public NamedJobInfo call(ServerSentEvent event) { try { return Jackson.fromJSON(event.contentAsString(), NamedJobInfo.class); } catch (IOException e) { throw new RuntimeException("Invalid namedJobInfo json: " + e.getMessage(), e); } } }) .timeout(3 * 60, TimeUnit.SECONDS) .filter(new Func1<NamedJobInfo, Boolean>() { @Override public Boolean call(NamedJobInfo namedJobInfo) { return namedJobInfo != null && !JobSchedulingInfo.HB_JobId.equals(namedJobInfo.getName()); } }) ; }}) ; } }) .repeatWhen(repeatFn) .retryWhen(retryFn) ; } public Observable<NamedJobInfo> namedJobInfo(final String jobName) { return namedJobInfo(jobName, retryLogic, repeatLogic); } }
4,234
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v1/JobDiscoveryStreamRouteTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.NotUsed; import akka.actor.ActorRef; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.JobClustersManagerActor; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandlerAkkaImpl; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.job.JobTestHelper; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.scheduler.AgentsErrorMonitorActor; import io.mantisrx.master.scheduler.FakeMantisScheduler; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.core.NamedJobInfo; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import rx.Observable; import java.time.Duration; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.function.Function; public class JobDiscoveryStreamRouteTest extends RouteTestBase { private final static Logger logger = LoggerFactory.getLogger(JobDiscoveryStreamRouteTest.class); private static Thread t; private static final int SERVER_PORT = 8201; private static volatile CompletionStage<ServerBinding> binding; private ActorRef agentsErrorMonitorActor = system.actorOf(AgentsErrorMonitorActor.props()); private final TestMantisClient mantisClient = new TestMantisClient(SERVER_PORT); JobDiscoveryStreamRouteTest(){ super("JobDiscoveryRoute", SERVER_PORT); } @BeforeClass public void setup() throws Exception { JobTestHelper.deleteAllFiles(); JobTestHelper.createDirsIfRequired(); final CountDownLatch latch = new CountDownLatch(1); t = new Thread(() -> { try { // boot up server using the route as defined below final Http http = Http.get(system); final ActorMaterializer materializer = ActorMaterializer.create(system); final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); TestHelpers.setupMasterConfig(); ActorRef jobClustersManagerActor = system.actorOf(JobClustersManagerActor.props( new MantisJobStore(new io.mantisrx.server.master.persistence.SimpleCachedFileStorageProvider(true)), lifecycleEventPublisher), "jobClustersManager"); MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor); jobClustersManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(fakeScheduler, false), ActorRef.noSender()); agentsErrorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(fakeScheduler), ActorRef.noSender()); Duration idleTimeout = system.settings().config().getDuration("akka.http.server.idle-timeout"); logger.info("idle timeout {} sec ", idleTimeout.getSeconds()); final JobDiscoveryRouteHandler jobDiscoveryRouteHandler = new JobDiscoveryRouteHandlerAkkaImpl(jobClustersManagerActor, idleTimeout); final JobDiscoveryStreamRoute jobDiscoveryRoute = new JobDiscoveryStreamRoute(jobDiscoveryRouteHandler); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = jobDiscoveryRoute.createRoute(Function.identity()).flow(system, materializer); logger.info("starting test server on port {}", SERVER_PORT); binding = http.bindAndHandle(routeFlow, ConnectHttp.toHost("localhost", SERVER_PORT), materializer); latch.countDown(); } catch (Exception e) { logger.info("caught exception", e); latch.countDown(); e.printStackTrace(); } }); t.setDaemon(true); t.start(); latch.await(); } @AfterClass public void teardown() { logger.info("JobDiscoveryRouteTest teardown"); if (binding != null) { binding .thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done } t.interrupt(); } @Test public void testJobDiscoveryStreamForNonExistentJob() throws InterruptedException { // The current behavior of Mantis client is to retry non-200 responses // This test overrides the default retry/repeat behavior to test a Sched info observable would complete if the job id requested is non-existent final CountDownLatch latch = new CountDownLatch(1); Observable<JobSchedulingInfo> jobSchedulingInfoObservable = mantisClient .discoveryStream("testJobCluster-1", obs -> Observable.just(1), obs -> Observable.empty() ); jobSchedulingInfoObservable .doOnNext(x -> logger.info("onNext {}", x)) .doOnError(t -> logger.warn("onError", t)) .doOnCompleted(() -> { logger.info("onCompleted"); latch.countDown(); }) .subscribe(); latch.await(); } @Test public void testLastSubmittedJobIdStreamForNonExistentJob() throws InterruptedException { // The current behavior of Mantis client is to retry non-200 responses // This test overrides the default retry/repeat behavior to test a namedjob info observable would complete if the job cluster requested is non-existent final CountDownLatch latch = new CountDownLatch(1); Observable<NamedJobInfo> jobSchedulingInfoObservable = mantisClient .namedJobInfo("testJobCluster", obs -> Observable.just(1), obs -> Observable.empty() ); jobSchedulingInfoObservable .doOnNext(x -> logger.info("onNext {}", x)) .doOnError(t -> logger.warn("onError", t)) .doOnCompleted(() -> { logger.info("onCompleted"); latch.countDown(); }) .subscribe(); latch.await(); } }
4,235
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v1/JobsRouteTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.NotUsed; import akka.actor.ActorRef; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.ContentTypes; import akka.http.javadsl.model.HttpEntities; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.http.javadsl.model.StatusCodes; import akka.stream.javadsl.Flow; import io.mantisrx.shaded.com.fasterxml.jackson.databind.JsonNode; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.api.akka.payloads.JobPayloads; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.api.akka.route.MantisMasterRoute; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler; import io.mantisrx.master.api.akka.route.v0.AgentClusterRoute; import io.mantisrx.master.api.akka.route.v0.JobClusterRoute; import io.mantisrx.master.api.akka.route.v0.JobDiscoveryRoute; import io.mantisrx.master.api.akka.route.v0.JobRoute; import io.mantisrx.master.api.akka.route.v0.JobStatusRoute; import io.mantisrx.master.api.akka.route.v0.MasterDescriptionRoute; import io.mantisrx.master.events.*; import io.mantisrx.master.jobcluster.job.JobTestHelper; import io.mantisrx.master.scheduler.FakeMantisScheduler; import io.mantisrx.master.JobClustersManagerActor; import io.mantisrx.master.api.akka.payloads.JobClusterPayloads; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.vm.AgentClusterOperations; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.core.WorkerAssignments; import io.mantisrx.server.core.master.LocalMasterMonitor; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.master.LeaderRedirectionFilter; import io.mantisrx.server.master.LeadershipManagerLocalImpl; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import org.testng.util.Strings; import java.io.IOException; import java.time.Duration; import java.util.Map; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.function.Function; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class JobsRouteTest extends RouteTestBase { private final static Logger logger = LoggerFactory.getLogger(JobsRouteTest.class); private static Thread t; private static final int SERVER_PORT = 8204; private static CompletionStage<ServerBinding> binding; private static final String TEST_CLUSTER = "sine-function"; private static final String TEST_JOB_ID = "sine-function-1"; JobsRouteTest() { super("JobsRoute", SERVER_PORT); } @BeforeClass public void setup() throws Exception { JobTestHelper.deleteAllFiles(); JobTestHelper.createDirsIfRequired(); TestHelpers.setupMasterConfig(); final CountDownLatch latch = new CountDownLatch(1); t = new Thread(() -> { try { // boot up server using the route as defined below final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl( new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); ActorRef jobClustersManagerActor = system.actorOf(JobClustersManagerActor.props( new MantisJobStore(new io.mantisrx.server.master.persistence.SimpleCachedFileStorageProvider( true)), lifecycleEventPublisher), "jobClustersManager"); MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor); jobClustersManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize( fakeScheduler, false), ActorRef.noSender()); final JobClusterRouteHandler jobClusterRouteHandler = new JobClusterRouteHandlerAkkaImpl( jobClustersManagerActor); final JobRouteHandler jobRouteHandler = new JobRouteHandlerAkkaImpl( jobClustersManagerActor); MasterDescription masterDescription = new MasterDescription( "127.0.0.1", "127.0.0.1", SERVER_PORT, SERVER_PORT, SERVER_PORT, "api/postjobstatus", SERVER_PORT, System.currentTimeMillis()); Duration idleTimeout = system.settings() .config() .getDuration("akka.http.server.idle-timeout"); logger.info("idle timeout {} sec ", idleTimeout.getSeconds()); final AgentClusterOperations mockAgentClusterOps = mock(AgentClusterOperations.class); final JobStatusRouteHandler jobStatusRouteHandler = mock(JobStatusRouteHandler.class); when(jobStatusRouteHandler.jobStatus(anyString())).thenReturn(Flow.create()); final JobRoute v0JobRoute = new JobRoute(jobRouteHandler, system); JobDiscoveryRouteHandler jobDiscoveryRouteHandler = new JobDiscoveryRouteHandlerAkkaImpl( jobClustersManagerActor, idleTimeout); final JobDiscoveryRoute v0JobDiscoveryRoute = new JobDiscoveryRoute( jobDiscoveryRouteHandler); final JobClusterRoute v0JobClusterRoute = new JobClusterRoute( jobClusterRouteHandler, jobRouteHandler, system); final JobStatusRoute v0JobStatusRoute = new JobStatusRoute(jobStatusRouteHandler); final AgentClusterRoute v0AgentClusterRoute = new AgentClusterRoute( mockAgentClusterOps, system); final MasterDescriptionRoute v0MasterDescriptionRoute = new MasterDescriptionRoute( masterDescription); final JobsRoute v1JobsRoute = new JobsRoute( jobClusterRouteHandler, jobRouteHandler, system); final JobClustersRoute v1JobClusterRoute = new JobClustersRoute( jobClusterRouteHandler, system); final AgentClustersRoute v1AgentClustersRoute = new AgentClustersRoute( mockAgentClusterOps); final JobStatusStreamRoute v1JobStatusStreamRoute = new JobStatusStreamRoute( jobStatusRouteHandler); final AdminMasterRoute v1AdminMasterRoute = new AdminMasterRoute(masterDescription); final JobDiscoveryStreamRoute v1JobDiscoveryStreamRoute = new JobDiscoveryStreamRoute( jobDiscoveryRouteHandler); final LastSubmittedJobIdStreamRoute v1LastSubmittedJobIdStreamRoute = new LastSubmittedJobIdStreamRoute( jobDiscoveryRouteHandler); LocalMasterMonitor localMasterMonitor = new LocalMasterMonitor(masterDescription); LeadershipManagerLocalImpl leadershipMgr = new LeadershipManagerLocalImpl( masterDescription); leadershipMgr.setLeaderReady(); LeaderRedirectionFilter leaderRedirectionFilter = new LeaderRedirectionFilter( localMasterMonitor, leadershipMgr); final MantisMasterRoute app = new MantisMasterRoute( leaderRedirectionFilter, v0MasterDescriptionRoute, v0JobClusterRoute, v0JobRoute, v0JobDiscoveryRoute, v0JobStatusRoute, v0AgentClusterRoute, v1JobClusterRoute, v1JobsRoute, v1AdminMasterRoute, v1AgentClustersRoute, v1JobDiscoveryStreamRoute, v1LastSubmittedJobIdStreamRoute, v1JobStatusStreamRoute); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = app.createRoute() .orElse(v1JobsRoute.createRoute( Function.identity())) .flow( system, materializer); logger.info("starting test server on port {}", SERVER_PORT); latch.countDown(); binding = http.bindAndHandle( routeFlow, ConnectHttp.toHost("localhost", SERVER_PORT), materializer); } catch (Exception e) { logger.info("caught exception", e); latch.countDown(); e.printStackTrace(); } }); t.setDaemon(true); t.start(); latch.await(); } @AfterClass public void teardown() { logger.info("V1JobsRouteTest teardown"); binding.thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done t.interrupt(); } @Test public void cleanupExistingJobs() throws InterruptedException { super.deleteClusterIfExist(TEST_CLUSTER); assert !this.isClusterExist(TEST_CLUSTER); } @Test(dependsOnMethods = {"cleanupExistingJobs"}) public void setupJobCluster() throws InterruptedException { testPost( getJobClustersEndpoint(), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_CREATE), StatusCodes.CREATED, null); assert this.isClusterExist(TEST_CLUSTER); } @Test(dependsOnMethods = {"setupJobCluster"}) public void testJobSubmit() throws InterruptedException { testPost( getClusterJobsEndpoint(TEST_CLUSTER), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_SUBMIT), StatusCodes.CREATED, this::validateJobResponse); } @Test() public void testPutOnJobsEp_NotAllowed() throws InterruptedException { testPut( getJobsEndpoint(), StatusCodes.METHOD_NOT_ALLOWED, null); testPut( getClusterJobsEndpoint(TEST_CLUSTER), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test() public void testDeleteOnJobsEp_NotAllowed() throws InterruptedException { testDelete( getJobsEndpoint(), StatusCodes.METHOD_NOT_ALLOWED, null); testDelete( getClusterJobsEndpoint(TEST_CLUSTER), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testPostOnJobInstanceEp_NotAllowed() throws InterruptedException { testPost( getJobInstanceEndpoint(TEST_JOB_ID), StatusCodes.METHOD_NOT_ALLOWED, null); testPost( getJobInstanceEndpoint(TEST_CLUSTER, TEST_JOB_ID), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testPutOnJobInstanceEp_NotAllowed() throws InterruptedException { testPut( getJobInstanceEndpoint(TEST_JOB_ID), StatusCodes.METHOD_NOT_ALLOWED, null); testPut( getJobInstanceEndpoint(TEST_CLUSTER, TEST_JOB_ID), StatusCodes.METHOD_NOT_ALLOWED, null); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testGetLatestJobDiscoveryInfo() throws InterruptedException { testGet( getJobClusterLatestJobDiscoveryInfoEp(TEST_CLUSTER), StatusCodes.OK, this::validateSchedulingInfo); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testGetOnJobInstanceActionsEp_NotAllowed() throws InterruptedException { for (String action : new String[]{"resubmitWorker", "quickSubmit", "scaleStage"}) { testPut( getJobInstanceEndpoint(TEST_JOB_ID) + "/actions/" + action, StatusCodes.METHOD_NOT_ALLOWED, null); testPut( getJobInstanceEndpoint(TEST_CLUSTER, TEST_JOB_ID) + "/actions/" + action, StatusCodes.METHOD_NOT_ALLOWED, null); } } @Test(dependsOnMethods = {"setupJobCluster"}) public void testValidJobSubmitToNonExistentCluster() throws InterruptedException { testPost( getClusterJobsEndpoint("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_SUBMIT_NonExistent), StatusCodes.NOT_FOUND, (m) -> { assert m.contains("Job Cluster NonExistent doesn't exist"); }); } @Test(dependsOnMethods = {"setupJobCluster"}) public void testInvalidJobSubmitToNonExistentCluster() throws InterruptedException { testPost( getClusterJobsEndpoint("NonExistent"), HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_SUBMIT), StatusCodes.BAD_REQUEST, (m) -> { assert m.contains("Cluster name specified in request payload [sine-function]" + " does not match with what specified in resource endpoint [NonExistent]"); }); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testGetJobsRouteViaClusterJobsEp() throws InterruptedException { testGet( getClusterJobsEndpoint(TEST_CLUSTER), StatusCodes.OK, resp -> validateJobsListResponse(resp, 1, false)); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testGetJobsRouteViaJobsEp() throws InterruptedException { testGet( getJobsEndpoint(), StatusCodes.OK, resp -> validateJobsListResponse(resp, 1, false)); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testGetJobsRouteViaJobsEpCompactResp() throws InterruptedException { testGet( getJobsEndpoint() + "?compact=true", StatusCodes.OK, resp -> validateJobsListResponse(resp, 1, true)); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testGetJobsRouteViaClusterJobEpCompactResp() throws InterruptedException { testGet( getClusterJobsEndpoint(TEST_CLUSTER) + "?compact=true", StatusCodes.OK, resp -> validateJobsListResponse(resp, 1, true)); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testGetJobInstanceWithClusterName() throws InterruptedException { testGet( getJobInstanceEndpoint(TEST_CLUSTER, TEST_JOB_ID), StatusCodes.OK, this::validateJobDetails); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testGetJobInstanceWithoutClusterName() throws InterruptedException { testGet( getJobInstanceEndpoint(TEST_JOB_ID), StatusCodes.OK, this::validateJobDetails); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testGetNonExistentJobInstanceWithoutClusterName() throws InterruptedException { testGet( getJobInstanceEndpoint("NonExistent-1"), StatusCodes.NOT_FOUND, (m) -> { assert m.contains("Job NonExistent-1 doesn't exist"); }); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testGetJobInstanceWithNonMatchingClusterName() throws InterruptedException { testGet( getJobInstanceEndpoint("NonExistent", TEST_JOB_ID), StatusCodes.NOT_FOUND, (m) -> { assert m.contains("JobId [sine-function-1] exists but does not " + "belong to specified cluster [NonExistent]"); }); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testGetNonExistentJobInstance() throws InterruptedException { testGet( getJobInstanceEndpoint(TEST_CLUSTER, "NonExistent-1"), StatusCodes.NOT_FOUND, (m) -> { assert m.contains("Job NonExistent-1 doesn't exist"); }); } @Test(dependsOnMethods = {"testGetNonExistentJobInstance"}) public void testJobQuickSubmit() throws InterruptedException { testPost( getJobsEndpoint() + "/actions/quickSubmit", HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.QUICK_SUBMIT), StatusCodes.CREATED, this::validateJobResponse); } @Test(dependsOnMethods = {"testJobQuickSubmit"}) public void testNonExistentJobQuickSubmit() throws InterruptedException { testPost( getJobsEndpoint() + "/actions/quickSubmit", HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.QUICK_SUBMIT_NONEXISTENT), StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = {"testNonExistentJobQuickSubmit"}) public void testJobResubmitWorker() throws InterruptedException { testPost( getJobInstanceEndpoint(TEST_JOB_ID) + "/actions/resubmitWorker", HttpEntities.create( ContentTypes.APPLICATION_JSON, JobPayloads.RESUBMIT_WORKER), StatusCodes.NO_CONTENT, null); } @Test(dependsOnMethods = {"testJobResubmitWorker"}) public void testNonExistentJobResubmitWorker() throws InterruptedException { testPost( getJobInstanceEndpoint("NonExistent-1") + "/actions/resubmitWorker", HttpEntities.create( ContentTypes.APPLICATION_JSON, JobPayloads.RESUBMIT_WORKER_NONEXISTENT), StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = {"testNonExistentJobResubmitWorker"}) public void testJobScaleStage() throws InterruptedException { testPost( getJobInstanceEndpoint(TEST_JOB_ID) + "/actions/scaleStage", HttpEntities.create( ContentTypes.APPLICATION_JSON, JobPayloads.SCALE_STAGE), StatusCodes.NO_CONTENT, null); } @Test(dependsOnMethods = {"testJobScaleStage"}) public void testNonExistentJobScaleStage() throws InterruptedException { testPost( getJobInstanceEndpoint("NonExistent-1") + "/actions/scaleStage", HttpEntities.create( ContentTypes.APPLICATION_JSON, JobPayloads.SCALE_STAGE_NonExistent), StatusCodes.NOT_FOUND, null); } @Test(dependsOnMethods = {"testNonExistentJobScaleStage"}) public void testInvalidJobScaleStage() throws InterruptedException { testPost( getJobInstanceEndpoint("NonExistent-1") + "/actions/scaleStage", HttpEntities.create( ContentTypes.APPLICATION_JSON, JobPayloads.SCALE_STAGE), StatusCodes.BAD_REQUEST, (m) -> { assert m.contains("JobId specified in request payload [sine-function-1] " + "does not match with resource uri [NonExistent-1]"); }); } @Test(dependsOnMethods = {"testInvalidJobScaleStage"}) public void testJobKill() throws InterruptedException { testDelete( getJobInstanceEndpoint(TEST_JOB_ID) + "?user=test&reason=unittest", StatusCodes.ACCEPTED, null); } @Test(dependsOnMethods = {"testJobResubmitWorker"}) public void testNonExistentJobKill() throws InterruptedException { testDelete( getJobInstanceEndpoint("NonExistent-1") + "?user=test&reason=unittest", StatusCodes.NOT_FOUND, null); } private void validateJobResponse(String resp) { try { assert !Strings.isNullOrEmpty(resp); ObjectMapper mapper = new ObjectMapper(); JsonNode responseObj = mapper.readTree(resp); assert responseObj.get("jobMetadata").get("name").asText().equals(TEST_CLUSTER); assert responseObj.get("jobMetadata").get("jobId").asText().startsWith("sine-function-"); assert responseObj.get("jobMetadata").get("sla") != null; assert responseObj.get("jobMetadata").get("labels") != null; assert responseObj.get("stageMetadataList") != null; assert responseObj.get("workerMetadataList") != null; } catch (IOException ex) { logger.error("Failed to validate job response: " + ex.getMessage()); assert false; } } private void validateJobDetails(String resp) { try { assert !Strings.isNullOrEmpty(resp); ObjectMapper mapper = new ObjectMapper(); JsonNode responseObj = mapper.readTree(resp); validateJobsListItem(responseObj,false); } catch (IOException ex) { logger.error("Failed to validate job details response: " + ex.getMessage()); assert false; } } private void validateSchedulingInfo(String s) { try { assert !Strings.isNullOrEmpty(s); JobSchedulingInfo jsi = Jackson.fromJSON(s, JobSchedulingInfo.class); assert jsi.getJobId().equals(TEST_JOB_ID); Map<Integer, WorkerAssignments> wa = jsi.getWorkerAssignments(); assert wa.size() == 2; assert wa.containsKey(0); assert wa.get(0).getNumWorkers() == 1; assert wa.containsKey(1); assert wa.get(1).getNumWorkers() == 1; } catch (IOException e) { logger.error("caught unexpected exc {}", e.getMessage(), e); assert false; } } private void validateJobsListResponse(String resp, int expectedJobsCount, boolean isCompact) { try { assert !Strings.isNullOrEmpty(resp); ObjectMapper mapper = new ObjectMapper(); JsonNode responseObj = mapper.readTree(resp).get("list"); assert responseObj.size() == expectedJobsCount; for (int i = 0; i < expectedJobsCount; i++) { validateJobsListItem(responseObj.get(i), isCompact); } } catch (IOException ex) { logger.error("Failed to validate job response: " + ex.getMessage()); assert false; } } private void validateJobDefinition(JsonNode responseObj) { assert responseObj != null; assert responseObj.get("name").asText().equals(TEST_CLUSTER); assert responseObj.get("artifactName").asText().equals( "https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/" + "mantis-examples-sine-function-0.2.9.zip"); assert responseObj.get("parameters").size() == 2; assert responseObj.get("jobSla").get("durationType").asText().equals("Perpetual"); assert responseObj.get("numberOfStages").asInt() == 2; assert responseObj.get("schedulingInfo") != null; assert responseObj.get("labels").size() == 7; } private void validateJobsListItem(JsonNode responseObj, boolean isCompact) { assert responseObj != null; if (isCompact) { assert responseObj.get("jobMetadata") == null; assert responseObj.get("stageMetadataList") == null; assert responseObj.get("workerMetadataList") == null; assert responseObj.get("submittedAt") != null; assert responseObj.get("user") != null; assert responseObj.get("type").asText().equals("Perpetual"); assert responseObj.get("numStages").asInt() == 2; assert responseObj.get("numWorkers").asInt() == 2; assert responseObj.get("totCPUs").asInt() == 2; assert responseObj.get("totMemory").asInt() == 400; assert responseObj.get("labels").size() == 7; assert responseObj.get("jobId").asText().startsWith("sine-function-"); } else { assert responseObj.get("jobMetadata") .get("jobId") .asText() .startsWith("sine-function-"); assert responseObj.get("jobMetadata").get("name").asText().equals("sine-function"); assert responseObj.get("jobMetadata").get("jarUrl").asText().equals( "https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/" + "mantis-examples-sine-function-0.2.9.zip"); assert responseObj.get("jobMetadata").get("numStages").asInt() == 2; assert responseObj.get("jobMetadata").get("parameters").size() == 2; assert responseObj.get("jobMetadata").get("labels").size() == 7; assert responseObj.get("jobMetadata") != null; assert responseObj.get("stageMetadataList") != null; assert responseObj.get("workerMetadataList") != null; assert responseObj.get("stageMetadataList").size() == 2; assert responseObj.get("workerMetadataList").size() == 2; } } }
4,236
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v1/AdminMasterRouteTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.NotUsed; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.jobcluster.job.JobTestHelper; import io.mantisrx.server.core.master.MasterDescription; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import java.util.List; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Function; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class AdminMasterRouteTest extends RouteTestBase { private final static Logger logger = LoggerFactory.getLogger(AdminMasterRouteTest.class); private static Thread t; private static final int ADMIN_MASTER_PORT = 8205; private static final MasterDescription fakeMasterDesc = new MasterDescription( "localhost", "127.0.0.1", ADMIN_MASTER_PORT, ADMIN_MASTER_PORT + 2, -1, "api/v1/jobs/actions/postJobStatus", -1, System.currentTimeMillis()); private static CompletionStage<ServerBinding> binding; private static final AdminMasterRoute masterDescRoute; static { TestHelpers.setupMasterConfig(); masterDescRoute = new AdminMasterRoute(fakeMasterDesc); } AdminMasterRouteTest(){ super("MasterDescriptionRouteTest", 8205); } @BeforeClass public void setup() throws Exception { JobTestHelper.deleteAllFiles(); JobTestHelper.createDirsIfRequired(); final CountDownLatch latch = new CountDownLatch(1); t = new Thread(() -> { try { // boot up server using the route as defined below final Http http = Http.get(system); final ActorMaterializer materializer = ActorMaterializer.create(system); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = masterDescRoute.createRoute(Function.identity()).flow(system, materializer); logger.info("starting test server on port {}", ADMIN_MASTER_PORT); latch.countDown(); binding = http.bindAndHandle(routeFlow, ConnectHttp.toHost("localhost", ADMIN_MASTER_PORT), materializer); } catch (Exception e) { logger.info("caught exception", e); latch.countDown(); e.printStackTrace(); } }); t.setDaemon(true); t.start(); latch.await(); } @AfterClass public void teardown() { logger.info("MasterDescriptionRouteTest teardown"); binding .thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done t.interrupt(); } private String masterEndpoint(final String ep) { return String.format("http://127.0.0.1:%d/api/v1/%s", ADMIN_MASTER_PORT, ep); } @Test public void testMasterInfoAPI() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(masterEndpoint("masterInfo"))); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { try { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); MasterDescription masterDescription = Jackson.fromJSON(responseMessage, MasterDescription.class); logger.info("master desc ---> {}", masterDescription); assertEquals(fakeMasterDesc, masterDescription); } catch (Exception e) { fail("unexpected error "+ e.getMessage()); } latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } @Test public void testMasterConfigAPI() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(masterEndpoint("masterConfigs"))); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { try { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); List<AdminMasterRoute.Configlet> masterconfig = Jackson.fromJSON(responseMessage, new TypeReference<List<AdminMasterRoute.Configlet>>() {}); logger.info("master config ---> {}", masterconfig); assertEquals(masterDescRoute.getConfigs(), masterconfig); } catch (Exception e) { fail("unexpected error "+ e.getMessage()); } latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } }
4,237
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v1/AgentClustersRouteTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v1; import akka.NotUsed; import akka.actor.ActorRef; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.ContentTypes; import akka.http.javadsl.model.HttpEntities; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.fenzo.AutoScaleAction; import com.netflix.fenzo.AutoScaleRule; import com.netflix.fenzo.VirtualMachineLease; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.JobClustersManagerActor; import io.mantisrx.master.api.akka.payloads.AgentClusterPayloads; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.scheduler.FakeMantisScheduler; import io.mantisrx.master.scheduler.JobMessageRouterImpl; import io.mantisrx.master.vm.AgentClusterOperations; import io.mantisrx.master.vm.AgentClusterOperationsImpl; import io.mantisrx.server.master.AgentClustersAutoScaler; import io.mantisrx.server.master.persistence.IMantisStorageProvider; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.SimpleCachedFileStorageProvider; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import rx.Observer; import java.io.IOException; import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Function; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class AgentClustersRouteTest extends RouteTestBase { private final static Logger logger = LoggerFactory.getLogger(AgentClustersRouteTest.class); private static Thread t; private static final int serverPort = 8202; private static final ObjectMapper mapper = new ObjectMapper().configure( DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); private static String SERVER_ENDPOINT = String.format( "http://127.0.0.1:%d/api/v1/agentClusters", serverPort); private static CompletionStage<ServerBinding> binding; AgentClustersRouteTest(){ super("AgentClusterRoutes", 8202); } @BeforeClass public void setup() throws InterruptedException { TestHelpers.setupMasterConfig(); final CountDownLatch latch = new CountDownLatch(1); t = new Thread(() -> { try { // boot up server using the route as defined below final Http http = Http.get(system); final ActorMaterializer materializer = ActorMaterializer.create(system); IMantisStorageProvider storageProvider = new SimpleCachedFileStorageProvider(true); final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl( new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); ActorRef jobClustersManagerActor = system.actorOf( JobClustersManagerActor.props( new MantisJobStore(storageProvider), lifecycleEventPublisher), "jobClustersManager"); MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor); jobClustersManagerActor.tell( new JobClusterManagerProto.JobClustersManagerInitialize( fakeScheduler, false), ActorRef.noSender()); setupDummyAgentClusterAutoScaler(); final AgentClustersRoute agentClusterV2Route = new AgentClustersRoute( new AgentClusterOperationsImpl( storageProvider, new JobMessageRouterImpl(jobClustersManagerActor), fakeScheduler, lifecycleEventPublisher, "cluster")); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = agentClusterV2Route.createRoute( Function.identity()).flow(system, materializer); logger.info("test server starting on port {}", serverPort); latch.countDown(); binding = http.bindAndHandle(routeFlow, ConnectHttp.toHost("localhost", serverPort), materializer); } catch (Exception e) { logger.info("caught exception", e); latch.countDown(); e.printStackTrace(); } }); t.setDaemon(true); t.start(); latch.await(); } @AfterClass public void teardown() { logger.info("V1AgentClusterRouteTest teardown"); binding .thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done t.interrupt(); } private static void setupDummyAgentClusterAutoScaler() { final AutoScaleRule dummyAutoScaleRule = new AutoScaleRule() { @Override public String getRuleName() { return "test"; } @Override public int getMinIdleHostsToKeep() { return 1; } @Override public int getMaxIdleHostsToKeep() { return 10; } @Override public long getCoolDownSecs() { return 300; } @Override public boolean idleMachineTooSmall(VirtualMachineLease lease) { return false; } @Override public int getMinSize() { return 1; } @Override public int getMaxSize() { return 100; } }; try { AgentClustersAutoScaler.initialize(() -> new HashSet<>(Collections.singletonList( dummyAutoScaleRule)), new Observer<AutoScaleAction>() { @Override public void onCompleted() { } @Override public void onError(Throwable e) { } @Override public void onNext(AutoScaleAction autoScaleAction) { } }); } catch (Exception e) { logger.info("AgentClustersAutoScaler is already initialized by another test", e); } } @Test() public void testSetActiveAgentClusters() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(SERVER_ENDPOINT) .withEntity(HttpEntities.create( ContentTypes.APPLICATION_JSON, AgentClusterPayloads.SET_ACTIVE))); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); latch.countDown(); }); assertTrue(latch.await(10, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testSetActiveAgentClusters"}) public void testGetJobsOnAgentClusters() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(SERVER_ENDPOINT + "/jobs")); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); // TODO validate jobs on VM response assertEquals("{}", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testGetJobsOnAgentClusters"}) public void testGetAutoScalePolicy() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(SERVER_ENDPOINT + "/autoScalePolicy")); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); try { Map<String, AgentClusterOperations.AgentClusterAutoScaleRule> agentClusterAutoScaleRule = mapper .readValue( responseMessage, new TypeReference<Map<String, AgentClusterOperations.AgentClusterAutoScaleRule>>() { }); agentClusterAutoScaleRule.values().forEach(autoScaleRule -> { assertEquals("test", autoScaleRule.getName()); assertEquals(300, autoScaleRule.getCooldownSecs()); assertEquals(1, autoScaleRule.getMinIdle()); assertEquals(10, autoScaleRule.getMaxIdle()); assertEquals(1, autoScaleRule.getMinSize()); assertEquals(100, autoScaleRule.getMaxSize()); }); } catch (IOException e) { logger.error("caught error", e); fail("failed to deserialize response"); } latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testGetAutoScalePolicy"}) public void testGetActiveAgentClusters() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(SERVER_ENDPOINT)); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); assertEquals(AgentClusterPayloads.SET_ACTIVE, responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } }
4,238
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/pagination/ListObjectTests.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.pagination; import java.util.ArrayList; import java.util.List; import java.util.Random; import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import scala.Tuple1; public class ListObjectTests { private static Random rnd = new Random(System.currentTimeMillis()); @Rule public ExpectedException thrown = ExpectedException.none(); @Test @Ignore public void testSortingByInvalidFieldName() { List<TestObject> objects = generateList(10); thrown.expect(RuntimeException.class); thrown.expectMessage("Specified sort field is invalid."); new ListObject.Builder<TestObject>() .withObjects(objects, TestObject.class) .withSortField("invalidValue") .withSortAscending(true) .build(); } @Test public void testSortingByNullFieldName() throws RuntimeException { ArrayList<TestObject> objects = generateList(10); // if not specifying sort field, the returned list should be in original order List<TestObject> list = new ListObject.Builder<TestObject>() .withObjects((List<TestObject>) objects.clone(), TestObject.class) .withSortField(null) .withSortAscending(true) .build().list; for (int i = 0; i < objects.size(); i++) { assert objects.get(i).publicValue == list.get(i).publicValue; } } @Test public void testSortingByEmptyFieldName() throws RuntimeException { ArrayList<TestObject> objects = generateList(10); // if not specifying sort field, the returned list should be in original order List<TestObject> list = new ListObject.Builder<TestObject>() .withObjects((List<TestObject>) objects.clone(), TestObject.class) .withSortField("") .withSortAscending(true) .build().list; for (int i = 0; i < objects.size(); i++) { assert objects.get(i).publicValue == list.get(i).publicValue; } } @Test public void testSortingByPublicValueFieldName() { List<TestObject> objects = generateList(10); List<TestObject> sortedList = new ListObject.Builder<TestObject>() .withObjects(objects, TestObject.class) .withSortField("publicValue") .withSortAscending(true) .build().list; assert sortedList.size() == objects.size(); int prevValue = sortedList.get(0).publicValue; for (int i = 1; i < sortedList.size(); i++) { assert sortedList.get(i).publicValue >= prevValue; prevValue = sortedList.get(i).publicValue; } } @Test public void testSortingByPublicValueFieldNameDescending() { List<TestObject> objects = generateList(10); List<TestObject> sortedList = new ListObject.Builder<TestObject>() .withObjects(objects, TestObject.class) .withSortField("publicValue") .withSortAscending(false) .build().list; assert sortedList.size() == objects.size(); int prevValue = sortedList.get(0).publicValue; for (int i = 1; i < sortedList.size(); i++) { assert sortedList.get(i).publicValue < prevValue; prevValue = sortedList.get(i).publicValue; } } @Test @Ignore public void testSortingByPrivateValueFieldName() { List<TestObject> objects = generateList(10); thrown.expect(RuntimeException.class); thrown.expectMessage("Cannot access sort field."); new ListObject.Builder<TestObject>() .withObjects(objects, TestObject.class) .withSortField("privateValue") .withSortAscending(true) .build(); } @Test public void testSortingByPrivateGetterValueFieldName() { List<TestObject> objects = generateList(10); List<TestObject> sortedList = new ListObject.Builder<TestObject>() .withObjects(objects, TestObject.class) .withSortField("privateGetterValue") .withSortAscending(true) .build().list; assert sortedList.size() == objects.size(); int prevValue = sortedList.get(0).publicValue; for (int i = 1; i < sortedList.size(); i++) { assert sortedList.get(i).publicValue >= prevValue; prevValue = sortedList.get(i).publicValue; } } @Test public void testSortingByProtectedValueFieldName() { List<TestObject> objects = generateList(10); List<TestObject> sortedList = new ListObject.Builder<TestObject>() .withObjects(objects, TestObject.class) .withSortField("protectedValue") .withSortAscending(true) .build().list; assert sortedList.size() == objects.size(); int prevValue = sortedList.get(0).publicValue; for (int i = 1; i < sortedList.size(); i++) { assert sortedList.get(i).publicValue >= prevValue; prevValue = sortedList.get(i).publicValue; } } @Test @Ignore public void testPaginationLimit() { List<TestObject> objects = generateList(10); assert (new ListObject.Builder<TestObject>() .withObjects(objects, TestObject.class) .withLimit(5) .build().list.size() == 5); } @Test @Ignore public void testPaginationInvalidLimit() { List<TestObject> objects = generateList(10); thrown.expect(IllegalStateException.class); thrown.expectMessage("limit needs to be greater than 0"); new ListObject.Builder<TestObject>() .withObjects(objects, TestObject.class) .withLimit(-1) .build().list.size(); } @Test public void testPaginationLimitAndOffset() { List<TestObject> objects = generateList(10); List<TestObject> list = new ListObject.Builder<TestObject>() .withObjects(objects, TestObject.class) .withLimit(5) .withOffset(1) .build().list; assert list.size() == 5; for (int i =0; i< 5; i++) { assert list.get(i).publicValue == objects.get(i+1).publicValue; } } @Test public void testPaginationTooBigLimitAndOffset() { List<TestObject> objects = generateList(10); List<TestObject> list = new ListObject.Builder<TestObject>() .withObjects(objects, TestObject.class) .withLimit(5) .withOffset(6) .build().list; assert list.size() == 4; for (int i =0; i< 4; i++) { assert list.get(i).publicValue == objects.get(i+6).publicValue; } } @Test public void testPaginationTooBigLimitAndInvalidOffset() { List<TestObject> objects = generateList(10); List<TestObject> list = new ListObject.Builder<TestObject>() .withObjects(objects, TestObject.class) .withLimit(5) .withOffset(11) .build().list; assert list.size() == 0; } @Test public void testEmptyList() { List<TestObject> objects = new ArrayList<>(); List<TestObject> list = new ListObject.Builder<TestObject>() .withObjects(objects, TestObject.class) .withOffset(0) .build().list; assert list.size() == 0; } private ArrayList<TestObject> generateList(int size) { assert size > 0; ArrayList<TestObject> list = new ArrayList<>(); for (int i = 0; i < size; i++) { list.add(new TestObject()); } return list; } public class TestObject { private int privateValue; private int privateGetterValue; public int publicValue; protected int protectedValue; public Tuple1<Integer> complexTypeField; public TestObject() { int randomVal = rnd.nextInt() % 10000; this.privateValue = randomVal; this.privateGetterValue = randomVal; this.publicValue = randomVal; this.protectedValue = randomVal; this.complexTypeField = new Tuple1<>(randomVal); } public int getPrivateGetterValue() { return this.privateGetterValue; } } }
4,239
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v0/TestMantisClient.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.core.NamedJobInfo; import io.mantisrx.server.core.master.MasterDescription; import io.netty.buffer.ByteBuf; import io.netty.handler.codec.http.HttpResponseStatus; import mantis.io.reactivex.netty.RxNetty; import mantis.io.reactivex.netty.pipeline.PipelineConfigurators; import mantis.io.reactivex.netty.protocol.http.client.HttpClient; import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest; import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse; import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.functions.Func1; import rx.functions.Func2; import java.io.IOException; import java.util.concurrent.TimeUnit; public class TestMantisClient { private static final Logger logger = LoggerFactory.getLogger(TestMantisClient.class); private final int serverPort; public TestMantisClient(final int serverPort) { this.serverPort = serverPort; } private final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic = attempts -> attempts .zipWith(Observable.range(1, Integer.MAX_VALUE), (Func2<Throwable, Integer, Integer>) (t1, integer) -> integer) .flatMap((Func1<Integer, Observable<?>>) integer -> { long delay = 2 * (integer > 10 ? 10 : integer); logger.info(": retrying conx after sleeping for " + delay + " secs"); return Observable.timer(delay, TimeUnit.SECONDS); }); private final Func1<Observable<? extends Void>, Observable<?>> repeatLogic = attempts -> attempts .zipWith(Observable.range(1, Integer.MAX_VALUE), (Func2<Void, Integer, Integer>) (t1, integer) -> integer) .flatMap((Func1<Integer, Observable<?>>) integer -> { long delay = 2 * (integer > 10 ? 10 : integer); logger.warn("On Complete received! : repeating conx after sleeping for " + delay + " secs"); return Observable.timer(delay, TimeUnit.SECONDS); }); private HttpClient<ByteBuf, ServerSentEvent> getRxnettySseClient(String hostname, int port) { return RxNetty.<ByteBuf, ServerSentEvent>newHttpClientBuilder(hostname, port) .pipelineConfigurator(PipelineConfigurators.<ByteBuf>clientSseConfigurator()) // .enableWireLogging(LogLevel.INFO) .withNoConnectionPooling().build(); } public Observable<JobSchedulingInfo> schedulingChanges(final String jobId, final Func1<Observable<? extends Throwable>, Observable<?>> retryFn, final Func1<Observable<? extends Void>, Observable<?>> repeatFn) { return Observable.just(new MasterDescription("localhost", "127.0.0.1", serverPort, serverPort, serverPort, "/api/postjobstatus", serverPort, System.currentTimeMillis())) .retryWhen(retryFn) .switchMap(new Func1<MasterDescription, Observable<JobSchedulingInfo>>() { @Override public Observable<JobSchedulingInfo> call(MasterDescription masterDescription) { return getRxnettySseClient(masterDescription.getHostname(), masterDescription.getSchedInfoPort()) .submit(HttpClientRequest.createGet("/assignmentresults/" + jobId + "?sendHB=true")) .flatMap(new Func1<HttpClientResponse<ServerSentEvent>, Observable<JobSchedulingInfo>>() { @Override public Observable<JobSchedulingInfo> call(HttpClientResponse<ServerSentEvent> response) { if (!HttpResponseStatus.OK.equals(response.getStatus())) { return Observable.error(new Exception(response.getStatus().reasonPhrase())); } return response.getContent() .map(new Func1<ServerSentEvent, JobSchedulingInfo>() { @Override public JobSchedulingInfo call(ServerSentEvent event) { try { return Jackson.fromJSON(event.contentAsString(), JobSchedulingInfo.class); } catch (IOException e) { throw new RuntimeException("Invalid schedInfo json: " + e.getMessage(), e); } } }) .timeout(3 * 60, TimeUnit.SECONDS) .filter(new Func1<JobSchedulingInfo, Boolean>() { @Override public Boolean call(JobSchedulingInfo schedulingInfo) { return schedulingInfo != null && !JobSchedulingInfo.HB_JobId.equals(schedulingInfo.getJobId()); } }) ; } }) ; } }) .repeatWhen(repeatFn) .retryWhen(retryFn) ; } public Observable<JobSchedulingInfo> schedulingChanges(final String jobId) { return schedulingChanges(jobId, retryLogic, repeatLogic); } public Observable<NamedJobInfo> namedJobInfo(final String jobName, final Func1<Observable<? extends Throwable>, Observable<?>> retryFn, final Func1<Observable<? extends Void>, Observable<?>> repeatFn) { return Observable.just(new MasterDescription("localhost", "127.0.0.1", serverPort, serverPort, serverPort, "/api/postjobstatus", serverPort, System.currentTimeMillis())) .filter(new Func1<MasterDescription, Boolean>() { @Override public Boolean call(MasterDescription masterDescription) { return masterDescription != null; } }) .retryWhen(retryFn) .switchMap(new Func1<MasterDescription, Observable<NamedJobInfo>>() { @Override public Observable<NamedJobInfo> call(MasterDescription masterDescription) { return getRxnettySseClient(masterDescription.getHostname(), masterDescription.getSchedInfoPort()) .submit(HttpClientRequest.createGet("/namedjobs/" + jobName + "?sendHB=true")) .flatMap(new Func1<HttpClientResponse<ServerSentEvent>, Observable<NamedJobInfo>>() { @Override public Observable<NamedJobInfo> call(HttpClientResponse<ServerSentEvent> response) { if(!HttpResponseStatus.OK.equals(response.getStatus())) return Observable.error(new Exception(response.getStatus().reasonPhrase())); return response.getContent() .map(new Func1<ServerSentEvent, NamedJobInfo>() { @Override public NamedJobInfo call(ServerSentEvent event) { try { return Jackson.fromJSON(event.contentAsString(), NamedJobInfo.class); } catch (IOException e) { throw new RuntimeException("Invalid namedJobInfo json: " + e.getMessage(), e); } } }) .timeout(3 * 60, TimeUnit.SECONDS) .filter(new Func1<NamedJobInfo, Boolean>() { @Override public Boolean call(NamedJobInfo namedJobInfo) { return namedJobInfo != null && !JobSchedulingInfo.HB_JobId.equals(namedJobInfo.getName()); } }) ; }}) ; } }) .repeatWhen(repeatFn) .retryWhen(retryFn) ; } public Observable<NamedJobInfo> namedJobInfo(final String jobName) { return namedJobInfo(jobName, retryLogic, repeatLogic); } }
4,240
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v0/JobRouteTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.NotUsed; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.ContentTypes; import akka.http.javadsl.model.HttpCharsets; import akka.http.javadsl.model.HttpEntities; import akka.http.javadsl.model.HttpEntity; import akka.http.javadsl.model.HttpMethods; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.http.javadsl.model.MediaType; import akka.http.javadsl.model.MediaTypes; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import akka.util.ByteString; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.JobClustersManagerActor; import io.mantisrx.master.api.akka.payloads.JobClusterPayloads; import io.mantisrx.master.api.akka.payloads.JobPayloads; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.api.akka.route.MantisMasterRoute; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler; import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.v1.AdminMasterRoute; import io.mantisrx.master.api.akka.route.v1.AgentClustersRoute; import io.mantisrx.master.api.akka.route.v1.JobClustersRoute; import io.mantisrx.master.api.akka.route.v1.JobDiscoveryStreamRoute; import io.mantisrx.master.api.akka.route.v1.JobStatusStreamRoute; import io.mantisrx.master.api.akka.route.v1.JobsRoute; import io.mantisrx.master.api.akka.route.v1.LastSubmittedJobIdStreamRoute; //import io.mantisrx.master.api.proto.JobArchivedWorkersResponse; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.job.JobTestHelper; import io.mantisrx.master.jobcluster.job.MantisJobMetadataView; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.scheduler.FakeMantisScheduler; import io.mantisrx.master.vm.AgentClusterOperations; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.core.NamedJobInfo; import io.mantisrx.server.core.WorkerAssignments; import io.mantisrx.server.core.master.LocalMasterMonitor; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.master.LeaderRedirectionFilter; import io.mantisrx.server.master.LeadershipManagerLocalImpl; import io.mantisrx.server.master.http.api.CompactJobInfo; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.store.MantisStageMetadataWritable; import io.mantisrx.server.master.store.MantisWorkerMetadataWritable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import rx.Observable; import java.io.IOException; import java.time.Duration; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class JobRouteTest { private final static Logger logger = LoggerFactory.getLogger(JobRouteTest.class); private final ActorMaterializer materializer = ActorMaterializer.create(system); private final Http http = Http.get(system); private static Thread t; private static final int serverPort = 8203; private static final int targetEndpointPort = serverPort; private final TestMantisClient mantisClient = new TestMantisClient(serverPort); private CompletionStage<String> processRespFut( final HttpResponse r, final Optional<Integer> expectedStatusCode) { logger.info("headers {} {}", r.getHeaders(), r.status()); expectedStatusCode.ifPresent(sc -> assertEquals(sc.intValue(), r.status().intValue())); assert (r.getHeader("Access-Control-Allow-Origin").isPresent()); assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value()); CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer); return strictEntity.thenCompose(s -> s.getDataBytes() .runFold( ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer) .thenApply(s2 -> s2.utf8String()) ); } private CompletionStage<String> processRespFutWithoutHeadersCheck( final HttpResponse r, final Optional<Integer> expectedStatusCode) { logger.info("headers {} {}", r.getHeaders(), r.status()); expectedStatusCode.ifPresent(sc -> assertEquals(sc.intValue(), r.status().intValue())); CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer); return strictEntity.thenCompose(s -> s.getDataBytes() .runFold( ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer) .thenApply(s2 -> s2.utf8String()) ); } private String getResponseMessage(final String msg, final Throwable t) { if (t != null) { logger.error("got err ", t); t.printStackTrace(); fail(t.getMessage()); } else { return msg; } return ""; } private static CompletionStage<ServerBinding> binding; private static ActorSystem system = ActorSystem.create("JobRoutes"); @BeforeClass public static void setup() throws Exception { JobTestHelper.deleteAllFiles(); JobTestHelper.createDirsIfRequired(); final CountDownLatch latch = new CountDownLatch(1); TestHelpers.setupMasterConfig(); t = new Thread(() -> { try { // boot up server using the route as defined below final Http http = Http.get(system); final ActorMaterializer materializer = ActorMaterializer.create(system); // SimpleCachedFileStorageProvider simpleCachedFileStorageProvider = new SimpleCachedFileStorageProvider(); // new File("/tmp/MantisSpool/namedJobs").mkdirs(); // IMantisStorageProvider storageProvider = new MantisStorageProviderAdapter(simpleCachedFileStorageProvider); final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl( new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); ActorRef jobClustersManagerActor = system.actorOf(JobClustersManagerActor.props( new MantisJobStore(new io.mantisrx.server.master.persistence.SimpleCachedFileStorageProvider( true)), lifecycleEventPublisher), "jobClustersManager"); MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor); jobClustersManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize( fakeScheduler, false), ActorRef.noSender()); final JobClusterRouteHandler jobClusterRouteHandler = new JobClusterRouteHandlerAkkaImpl( jobClustersManagerActor); final JobRouteHandler jobRouteHandler = new JobRouteHandlerAkkaImpl( jobClustersManagerActor); MasterDescription masterDescription = new MasterDescription( "127.0.0.1", "127.0.0.1", serverPort, serverPort, serverPort, "api/postjobstatus", serverPort, System.currentTimeMillis()); Duration idleTimeout = system.settings() .config() .getDuration("akka.http.server.idle-timeout"); logger.info("idle timeout {} sec ", idleTimeout.getSeconds()); final JobDiscoveryRouteHandler jobDiscoveryRouteHandler = new JobDiscoveryRouteHandlerAkkaImpl( jobClustersManagerActor, idleTimeout); final MasterDescriptionRoute masterDescriptionRoute = new MasterDescriptionRoute( masterDescription); final JobRoute v0JobRoute = new JobRoute(jobRouteHandler, system); final JobDiscoveryRoute v0JobDiscoveryRoute = new JobDiscoveryRoute( jobDiscoveryRouteHandler); final JobClusterRoute v0JobClusterRoute = new JobClusterRoute( jobClusterRouteHandler, jobRouteHandler, system); final JobClustersRoute v1JobClusterRoute = new JobClustersRoute( jobClusterRouteHandler, system); final JobsRoute v1JobsRoute = new JobsRoute( jobClusterRouteHandler, jobRouteHandler, system); final AdminMasterRoute v1AdminMasterRoute = new AdminMasterRoute(masterDescription); final JobStatusRouteHandler jobStatusRouteHandler = mock(JobStatusRouteHandler.class); when(jobStatusRouteHandler.jobStatus(anyString())).thenReturn(Flow.create()); final JobStatusRoute v0JobStatusRoute = new JobStatusRoute(jobStatusRouteHandler); final AgentClusterOperations mockAgentClusterOps = mock(AgentClusterOperations.class); final AgentClusterRoute v0AgentClusterRoute = new AgentClusterRoute( mockAgentClusterOps, system); final AgentClustersRoute v1AgentClusterRoute = new AgentClustersRoute( mockAgentClusterOps); final JobDiscoveryStreamRoute v1JobDiscoveryStreamRoute = new JobDiscoveryStreamRoute(jobDiscoveryRouteHandler); final LastSubmittedJobIdStreamRoute v1LastSubmittedJobIdStreamRoute = new LastSubmittedJobIdStreamRoute(jobDiscoveryRouteHandler); final JobStatusStreamRoute v1JobStatusStreamRoute = new JobStatusStreamRoute(jobStatusRouteHandler); LocalMasterMonitor localMasterMonitor = new LocalMasterMonitor(masterDescription); LeadershipManagerLocalImpl leadershipMgr = new LeadershipManagerLocalImpl( masterDescription); leadershipMgr.setLeaderReady(); LeaderRedirectionFilter leaderRedirectionFilter = new LeaderRedirectionFilter( localMasterMonitor, leadershipMgr); final MantisMasterRoute app = new MantisMasterRoute( leaderRedirectionFilter, masterDescriptionRoute, v0JobClusterRoute, v0JobRoute, v0JobDiscoveryRoute, v0JobStatusRoute, v0AgentClusterRoute, v1JobClusterRoute, v1JobsRoute, v1AdminMasterRoute, v1AgentClusterRoute, v1JobDiscoveryStreamRoute, v1LastSubmittedJobIdStreamRoute, v1JobStatusStreamRoute); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = app.createRoute() .flow(system, materializer); logger.info("starting test server on port {}", serverPort); binding = http.bindAndHandle( routeFlow, ConnectHttp.toHost("localhost", serverPort), materializer); latch.countDown(); } catch (Exception e) { logger.info("caught exception", e); latch.countDown(); e.printStackTrace(); } }); t.setDaemon(true); t.start(); latch.await(); Thread.sleep(100); } @AfterClass public static void teardown() { logger.info("JobRouteTest teardown"); binding .thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done t.interrupt(); } private String jobClusterAPIEndpoint(final String endpoint) { return String.format("http://127.0.0.1:%d/api/namedjob/%s", targetEndpointPort, endpoint); } private String jobAPIEndpoint(final String endpoint) { return String.format("http://127.0.0.1:%d/api/jobs/%s", targetEndpointPort, endpoint); } @Test public void cleanupExistingJobs() throws InterruptedException { // Disable cluster to terminate all running jobs final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(jobClusterAPIEndpoint("disable")) .withMethod(HttpMethods.POST) .withEntity(HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DISABLE))); responseFuture .thenCompose(r -> processRespFut(r, Optional.empty())) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response {}", responseMessage); latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); final CountDownLatch latch2 = new CountDownLatch(1); final CompletionStage<HttpResponse> respF = http.singleRequest( HttpRequest.POST(jobClusterAPIEndpoint("delete")) .withEntity(HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DELETE))); respF.thenCompose(r -> { CompletionStage<HttpEntity.Strict> strictEntity = r.entity() .toStrict(1000, materializer); return strictEntity.thenCompose(s -> s.getDataBytes() .runFold( ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer) .thenApply(s2 -> s2.utf8String())); }).whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); latch2.countDown(); }); assertTrue(latch2.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"cleanupExistingJobs"}) public void setupJobCluster() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(jobClusterAPIEndpoint("create")) .withMethod(HttpMethods.POST) .withEntity(HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_CREATE))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response {}", responseMessage); assertEquals("sine-function created", responseMessage); latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"setupJobCluster"}) public void testJobSubmit() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(String.format( "http://127.0.0.1:%d/api/submit", targetEndpointPort)) .withEntity(HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_SUBMIT))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); assertEquals("sine-function-1", responseMessage); latch.countDown(); }); assertTrue(latch.await(10, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testJobSubmit"}) public void testJobClusterGetJobIds() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(jobClusterAPIEndpoint("listJobIds/sine-function?jobState=Active"))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((msg, t) -> { try { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); List<JobClusterProtoAdapter.JobIdInfo> jobIdInfos = Jackson.fromJSON( responseMessage, new TypeReference<List<JobClusterProtoAdapter.JobIdInfo>>() { }); logger.info("jobInfos---> {}", jobIdInfos); assertEquals(1, jobIdInfos.size()); JobClusterProtoAdapter.JobIdInfo jobIdInfo = jobIdInfos.get(0); assertEquals("sine-function-1", jobIdInfo.getJobId()); //assertEquals("0.1.39 2018-03-13 09:40:53", jobIdInfo.getVersion()); assertEquals("", jobIdInfo.getTerminatedAt()); assertEquals("nmahilani", jobIdInfo.getUser()); assertTrue(jobIdInfo.getState().equals(MantisJobState.Accepted) || jobIdInfo.getState().equals(MantisJobState.Launched)); } catch (Exception e) { fail("unexpected error " + e.getMessage()); } latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testJobClusterGetJobIds"}) public void testJobClusterGetJobsList() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(jobAPIEndpoint("list"))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response---> {}", responseMessage); List<MantisJobMetadataView> jobInfos = Collections.emptyList(); try { jobInfos = Jackson.fromJSON( responseMessage, new TypeReference<List<MantisJobMetadataView>>() { }); } catch (IOException e) { logger.error("failed to deser json {}", responseMessage, e); fail("job list deser failed"); } logger.info("jobInfos---> {}", jobInfos); assertEquals(1, jobInfos.size()); MantisJobMetadataView mjm = jobInfos.get(0); assertEquals(mjm.getJobMetadata().getJobId(), "sine-function-1"); assertEquals(mjm.getJobMetadata().getName(), "sine-function"); assertTrue(mjm.getStageMetadataList().size() > 0); MantisStageMetadataWritable msm = mjm.getStageMetadataList().get(0); assertEquals(1, msm.getNumWorkers()); assertTrue(mjm.getWorkerMetadataList().size() > 0); MantisWorkerMetadataWritable mwm = mjm.getWorkerMetadataList().get(0); assertEquals("sine-function-1", mwm.getJobId()); assertEquals(false, mwm.getCluster().isPresent()); latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testJobClusterGetJobsList"}) public void testJobClusterGetJobDetail() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(jobAPIEndpoint("list/sine-function-1"))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response---> {}", responseMessage); MantisJobMetadataView mjm = null; try { mjm = Jackson.fromJSON(responseMessage, MantisJobMetadataView.class); } catch (IOException e) { logger.error("failed to deser json {}", responseMessage, e); fail("job info deser failed"); } logger.info("jobInfo---> {}", mjm); assertNotNull(mjm); assertEquals(mjm.getJobMetadata().getJobId(), "sine-function-1"); assertEquals(mjm.getJobMetadata().getName(), "sine-function"); assertTrue(mjm.getStageMetadataList().size() > 0); MantisStageMetadataWritable msm = mjm.getStageMetadataList().get(0); assertEquals(1, msm.getNumWorkers()); assertTrue(mjm.getWorkerMetadataList().size() > 0); MantisWorkerMetadataWritable mwm = mjm.getWorkerMetadataList().get(0); assertEquals("sine-function-1", mwm.getJobId()); assertEquals(false, mwm.getCluster().isPresent()); latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testJobClusterGetJobDetail"}) public void testJobClusterGetJobsCompact() throws InterruptedException { final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(jobAPIEndpoint("list?compact=true"))); try { responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); List<CompactJobInfo> jobIdInfos = Collections.emptyList(); try { jobIdInfos = Jackson.fromJSON( responseMessage, new TypeReference<List<CompactJobInfo>>() { }); } catch (IOException e) { logger.error( "failed to get CompactJobInfos from json response {}", responseMessage, e); fail("compactJobInfo deser failed"); } logger.info("got jobIdInfos {}", jobIdInfos); assertEquals(1, jobIdInfos.size()); CompactJobInfo jobInfo = jobIdInfos.get(0); assertEquals("sine-function-1", jobInfo.getJobId()); assertEquals("nmahilani", jobInfo.getUser()); assertEquals(7, jobInfo.getLabels().size()); assertEquals(2, jobInfo.getNumStages()); assertEquals(2, jobInfo.getNumWorkers()); assertTrue(jobInfo.getState().equals(MantisJobState.Accepted) || jobInfo.getState().equals(MantisJobState.Launched)); assertEquals(2.0, jobInfo.getTotCPUs(), 0.0); // TODO total memory is 400 for old master, 2048 for new master //assertEquals(400.0, jobInfo.getTotMemory(), 0.0); assertEquals(MantisJobDurationType.Perpetual, jobInfo.getType()); assertTrue(Collections.singletonMap("Started", 2) .equals(jobInfo.getStatesSummary()) || Collections.singletonMap("StartInitiated", 2) .equals(jobInfo.getStatesSummary()) || Collections.singletonMap("Launched", 2) .equals(jobInfo.getStatesSummary()) || Collections.singletonMap("Accepted", 2) .equals(jobInfo.getStatesSummary())); }).toCompletableFuture() .get(2, TimeUnit.SECONDS); } catch (ExecutionException e) { throw new RuntimeException(e); } catch (TimeoutException e) { throw new RuntimeException(e); } } @Test(dependsOnMethods = {"testJobClusterGetJobsCompact"}) public void testNamedJobInfoStream() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final String jobCluster = "sine-function"; Observable<NamedJobInfo> namedJobInfo = mantisClient.namedJobInfo(jobCluster); namedJobInfo .doOnNext(lastSubmittedJobId -> { logger.info( "namedJobInfo {} {}", lastSubmittedJobId.getName(), lastSubmittedJobId.getJobId()); try { lastSubmittedJobId.getName(); assertEquals("sine-function", lastSubmittedJobId.getName()); assertEquals("sine-function-1", lastSubmittedJobId.getJobId()); } catch (Exception e) { logger.error("caught exception", e); org.testng.Assert.fail( "testNamedJobInfoStream test failed with exception " + e.getMessage(), e); } latch.countDown(); }) .doOnError(t -> logger.warn("onError", t)) .doOnCompleted(() -> logger.info("onCompleted")) .doAfterTerminate(() -> latch.countDown()) .subscribe(); latch.await(); } @Test(dependsOnMethods = {"testNamedJobInfoStream"}) public void testSchedulingInfo() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final String jobId = "sine-function-1"; // final AtomicBoolean flag = new AtomicBoolean(false); Observable<JobSchedulingInfo> jobSchedulingInfoObservable = mantisClient.schedulingChanges( jobId); jobSchedulingInfoObservable .map(schedInfo -> { logger.info("schedInfo {}", schedInfo); try { assertEquals(jobId, schedInfo.getJobId()); Map<Integer, WorkerAssignments> wa = schedInfo.getWorkerAssignments(); assertEquals(2, wa.size()); // 1 worker in stage 0 assertEquals(1, wa.get(0).getHosts().size()); assertEquals(0, wa.get(0).getHosts().get(1).getWorkerIndex()); assertEquals(1, wa.get(0).getHosts().get(1).getWorkerNumber()); assertEquals( MantisJobState.Started, wa.get(0).getHosts().get(1).getState()); // 1 worker in stage 1 assertEquals(1, wa.get(1).getHosts().size()); assertEquals(0, wa.get(1).getHosts().get(2).getWorkerIndex()); assertEquals(2, wa.get(1).getHosts().get(2).getWorkerNumber()); assertEquals( MantisJobState.Started, wa.get(1).getHosts().get(2).getState()); // if (flag.compareAndSet(false, true)) { // testJobResubmitWorker(); // } } catch (Exception e) { logger.error("caught exception", e); org.testng.Assert.fail( "testSchedulingInfo test failed with exception " + e.getMessage(), e); } latch.countDown(); return schedInfo; }) .take(1) .doOnError(t -> logger.warn("onError", t)) .doOnCompleted(() -> logger.info("onCompleted")) .doAfterTerminate(() -> latch.countDown()) .subscribe(); latch.await(); } @Test(dependsOnMethods = {"testSchedulingInfo"}) public void testJobResubmitWorker() throws InterruptedException { Thread.sleep(3000); final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(jobAPIEndpoint(JobRoute.RESUBMIT_WORKER_ENDPOINT)) .withMethod(HttpMethods.POST) .withEntity(HttpEntities.create( ContentTypes.APPLICATION_JSON, JobPayloads.RESUBMIT_WORKER))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response {}", responseMessage); assertTrue(responseMessage.startsWith( "Worker 2 of job sine-function-1 resubmitted")); latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testJobResubmitWorker"}) public void testJobClusterGetJobArchivedWorkersList() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(jobAPIEndpoint("archived/sine-function-1"))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("############################got response---> {}", responseMessage); //JobArchivedWorkersResponse resp = null; //JobArchivedWorkersResponse.Builder builder = JobArchivedWorkersResponse.newBuilder(); List<MantisWorkerMetadataWritable> workers = Collections.emptyList(); try { // JsonFormat.parser().ignoringUnknownFields().merge(responseMessage, builder); workers = Jackson.fromJSON( responseMessage, new TypeReference<List<MantisWorkerMetadataWritable>>() { }); // resp = builder.build(); } catch (IOException e) { logger.error("failed to deser json {}", responseMessage, e); fail("archived workers list deser failed"); } logger.info("archived workers ---> {}", workers); assertEquals(1, workers.size()); MantisWorkerMetadataWritable worker = workers.get(0); // WorkerMetadata worker = resp.getWorkers(0); // assertEquals(1, resp.getWorkersCount()); assertEquals("sine-function-1", worker.getJobId()); logger.info("no of ports " + worker.getNumberOfPorts()); assertEquals(5, worker.getNumberOfPorts()); logger.info("stage num " + worker.getStageNum()); assertEquals(1, worker.getStageNum()); logger.info("Reason " + worker.getReason().name()); //assertEquals("Relaunched", worker.getReason().name()); logger.info("state " + worker.getState().name()); assertEquals("Failed", worker.getState().name()); logger.info("index " + worker.getWorkerIndex()); assertEquals(0, worker.getWorkerIndex()); logger.info("worker no " + worker.getWorkerNumber()); assertEquals(2, worker.getWorkerNumber()); logger.info("resubmit cnt " + worker.getTotalResubmitCount()); assertEquals(0, worker.getTotalResubmitCount()); latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testJobClusterGetJobArchivedWorkersList"}) public void testJobClusterScaleStage() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(jobAPIEndpoint(JobRoute.SCALE_STAGE_ENDPOINT)) .withMethod(HttpMethods.POST) .withEntity(HttpEntities.create( ContentTypes.APPLICATION_JSON, JobPayloads.SCALE_STAGE))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response {}", responseMessage); assertEquals("Scaled stage 1 to 3 workers", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testJobClusterScaleStage"}) public void testJobStatus() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(String.format( "http://127.0.0.1:%d/api/postjobstatus", targetEndpointPort)) .withMethod(HttpMethods.POST) .withEntity( ContentTypes.create( MediaTypes.TEXT_PLAIN, HttpCharsets.ISO_8859_1), JobPayloads.JOB_STATUS)); responseFuture .thenCompose(r -> processRespFutWithoutHeadersCheck(r, Optional.of(200))) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response '{}'", responseMessage); assertEquals("forwarded worker status", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testJobStatus"}) public void testJobKill() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(jobAPIEndpoint(JobRoute.KILL_ENDPOINT)) .withMethod(HttpMethods.POST) .withEntity( ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED) , JobPayloads.KILL_JOB)); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response {}", responseMessage); assertEquals("[\"sine-function-1 Killed\"]", responseMessage.trim()); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testJobKill"}) public void testJobClusterDisable() throws InterruptedException { // Disable cluster to terminate all running jobs final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(jobClusterAPIEndpoint("disable")) .withMethod(HttpMethods.POST) .withEntity(HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DISABLE))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response {}", responseMessage); assertEquals("sine-function disabled", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testJobClusterDisable"}) public void testJobClusterDelete() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(jobClusterAPIEndpoint("delete")) .withEntity(HttpEntities.create( ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DELETE))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); assertEquals("sine-function deleted", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } }
4,241
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v0/JobClusterRouteTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.NotUsed; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.ContentTypes; import akka.http.javadsl.model.HttpCharsets; import akka.http.javadsl.model.HttpEntities; import akka.http.javadsl.model.HttpEntity; import akka.http.javadsl.model.HttpMethods; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.http.javadsl.model.MediaTypes; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import akka.util.ByteString; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.JobClustersManagerActor; import io.mantisrx.master.api.akka.payloads.JobClusterPayloads; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobClusterRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobRouteHandlerAkkaImpl; import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.MantisJobClusterMetadataView; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.scheduler.FakeMantisScheduler; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.SimpleCachedFileStorageProvider; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import java.io.IOException; import java.util.List; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class JobClusterRouteTest { private final static Logger logger = LoggerFactory.getLogger(JobClusterRouteTest.class); private final ActorMaterializer materializer = ActorMaterializer.create(system); private final Http http = Http.get(system); private static Thread t; private static final int serverPort = 8301; private CompletionStage<String> processRespFut(final HttpResponse r, final int expectedStatusCode) { logger.info("headers {} {}", r.getHeaders(), r.status()); assertEquals(expectedStatusCode, r.status().intValue()); assert(r.getHeader("Access-Control-Allow-Origin").isPresent()); assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value()); CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer); return strictEntity.thenCompose(s -> s.getDataBytes() .runFold(ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer) .thenApply(s2 -> s2.utf8String()) ); } private String getResponseMessage(final String msg, final Throwable t) { if (t != null) { logger.error("got err ", t); fail(t.getMessage()); } else { return msg; } return ""; } private static CompletionStage<ServerBinding> binding; private static ActorSystem system = ActorSystem.create("JobClusterRoutes"); @BeforeClass public static void setup() throws Exception { TestHelpers.setupMasterConfig(); final CountDownLatch latch = new CountDownLatch(1); t = new Thread(() -> { try { // boot up server using the route as defined below final Http http = Http.get(system); final ActorMaterializer materializer = ActorMaterializer.create(system); final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); ActorRef jobClustersManagerActor = system.actorOf(JobClustersManagerActor.props(new MantisJobStore(new SimpleCachedFileStorageProvider(true)), lifecycleEventPublisher), "jobClustersManager"); MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor); jobClustersManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(fakeScheduler, false), ActorRef.noSender()); final JobClusterRouteHandler jobClusterRouteHandler = new JobClusterRouteHandlerAkkaImpl(jobClustersManagerActor); final JobRouteHandler jobRouteHandler = new JobRouteHandlerAkkaImpl(jobClustersManagerActor); final JobClusterRoute app = new JobClusterRoute(jobClusterRouteHandler, jobRouteHandler, system); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = app.createRoute(Function.identity()).flow(system, materializer); logger.info("starting test server on port {}", serverPort); binding = http.bindAndHandle(routeFlow, ConnectHttp.toHost("localhost", serverPort), materializer); latch.countDown(); } catch (Exception e) { logger.info("caught exception", e); latch.countDown(); e.printStackTrace(); } }); t.setDaemon(true); t.start(); latch.await(); } @AfterClass public static void teardown() { logger.info("V0JobClusterRouteTest teardown"); binding .thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done t.interrupt(); } private String namedJobAPIEndpoint(final String endpoint) { return String.format("http://127.0.0.1:%d/api/namedjob/%s", serverPort, endpoint); } @Test public void cleanupExistingJobs() throws InterruptedException { // final CountDownLatch latch = new CountDownLatch(1); // final CompletionStage<HttpResponse> responseFuture = http.singleRequest( // HttpRequest.POST(namedJobAPIEndpoint("delete")) // .withEntity(HttpEntities.create(ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DELETE))); // responseFuture.thenCompose(r -> { // CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer); // return strictEntity.thenCompose(s -> // s.getDataBytes() // .runFold(ByteString.empty(), (acc, b) -> acc.concat(b), materializer) // .thenApply(s2 -> s2.utf8String())); // }).whenComplete((msg, t) -> { // String responseMessage = getResponseMessage(msg, t); // logger.info("got response {}", responseMessage); // latch.countDown(); // }); // assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "cleanupExistingJobs" }) public void testJobClusterCreate() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(namedJobAPIEndpoint("create")) .withMethod(HttpMethods.POST) .withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_CREATE)); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response {}", responseMessage); assertEquals("sine-function created", responseMessage); latch.countDown(); }); assertTrue(latch.await(3, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClusterCreate" }) public void testDuplicateJobClusterCreateFails() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(namedJobAPIEndpoint("create")) .withMethod(HttpMethods.POST) .withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_CREATE)); responseFuture .thenCompose(r -> processRespFut(r, 500)) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response {}", responseMessage); assertTrue(responseMessage.startsWith("{\"error\":")); latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testDuplicateJobClusterCreateFails" }) public void testJobClusterDisable() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(namedJobAPIEndpoint("disable")) .withMethod(HttpMethods.POST) .withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_DISABLE)); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response {}", responseMessage); assertEquals("sine-function disabled", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClusterDisable" }) public void testJobClusterEnable() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(namedJobAPIEndpoint("enable")) .withMethod(HttpMethods.POST) .withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_DISABLE)); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response {}", responseMessage); assertEquals("sine-function enabled", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClusterEnable" }) public void testJobClusterUpdateArtifact() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(namedJobAPIEndpoint("quickupdate")) .withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT)); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); assertEquals("sine-function artifact updated", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClusterUpdateArtifact" }) public void testJobClusterUpdateSLA() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(namedJobAPIEndpoint("updatesla")) .withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_UPDATE_SLA)); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); assertEquals("sine-function SLA updated", responseMessage); latch.countDown(); }); assertTrue(latch.await(10, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClusterUpdateSLA" }) public void testJobClusterUpdateLabels() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(namedJobAPIEndpoint("updatelabels")) .withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.JOB_CLUSTER_UPDATE_LABELS)); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); assertEquals("sine-function labels updated", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClusterUpdateLabels" }) public void testJobClusterUpdateMigrateStrategy() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(namedJobAPIEndpoint("migratestrategy")) .withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.MIGRATE_STRATEGY_UPDATE)); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); assertEquals("sine-function worker migration config updated", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClusterUpdateMigrateStrategy" }) public void testJobClusterQuickSubmit() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(namedJobAPIEndpoint("quicksubmit")) .withEntity(ContentTypes.create(MediaTypes.APPLICATION_X_WWW_FORM_URLENCODED), JobClusterPayloads.QUICK_SUBMIT)); responseFuture .thenCompose(r -> processRespFut(r, 400)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); assertTrue(responseMessage.contains("Job Definition could not retrieved from a previous submission (There may not be a previous submission)")); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClusterQuickSubmit" }) public void testJobClustersList() throws InterruptedException { int numIter = 10; final CountDownLatch latch = new CountDownLatch(numIter); AtomicReference<String> prevResp = new AtomicReference<>(null); for (int i =0; i < numIter; i++) { final CompletionStage<HttpResponse> responseFuture2 = http.singleRequest( HttpRequest.GET(namedJobAPIEndpoint("list"))); responseFuture2 .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); try { List<MantisJobClusterMetadataView> jobClusters = Jackson.fromJSON(responseMessage, new TypeReference<List<MantisJobClusterMetadataView>>() { }); assertEquals(1, jobClusters.size()); MantisJobClusterMetadataView jobCluster = jobClusters.get(0); assertEquals("sine-function", jobCluster.getName()); } catch (IOException e) { fail("failed to parse response message " + e.getMessage()); } if (prevResp.get() != null) { assertEquals(prevResp.get(), responseMessage); } prevResp.set(responseMessage); latch.countDown(); }); } assertTrue(latch.await(2, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClustersList" }) public void testJobClusterGetDetail() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(namedJobAPIEndpoint("list/sine-function"))); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); try { List<MantisJobClusterMetadataView> jobClusters = Jackson.fromJSON(responseMessage, new TypeReference<List<MantisJobClusterMetadataView>>() {}); assertEquals(1, jobClusters.size()); MantisJobClusterMetadataView jc = jobClusters.get(0); assertEquals("sine-function", jc.getName()); // TODO fix Jars list assertEquals(2, jc.getJars().size()); assertEquals(1, jc.getJars().get(0).getSchedulingInfo().getStages().size()); assertEquals(1, jc.getJars().get(0).getSchedulingInfo().getStages().get(1).getNumberOfInstances()); assertEquals(true, jc.getJars().get(0).getSchedulingInfo().getStages().get(1).getScalable()); assertEquals("sine-function", jc.getName()); } catch (Exception e) { logger.error("failed to deser json {}", responseMessage, e); fail("failed to deser json "+responseMessage); } latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClusterGetDetail" }) public void testJobClusterGetJobIds() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(namedJobAPIEndpoint("listJobIds/sine-function"))); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { try { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); List<JobClusterProtoAdapter.JobIdInfo> jobIdInfos = Jackson.fromJSON(responseMessage, new TypeReference<List<JobClusterProtoAdapter.JobIdInfo>>() { }); assertEquals(0, jobIdInfos.size()); } catch (Exception e) { fail("unexpected error "+ e.getMessage()); } latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClusterGetJobIds" }) public void testJobClusterGetAllJobIds() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(namedJobAPIEndpoint("listJobIds"))); responseFuture .thenCompose(r -> processRespFut(r, 400)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); assertEquals("Specify the Job cluster name '/api/namedjob/listJobIds/<JobClusterName>' to list the job Ids", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClusterGetAllJobIds" }) public void testJobClusterDisable2() throws InterruptedException { // Disable cluster to terminate all running jobs final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(namedJobAPIEndpoint("disable")) .withMethod(HttpMethods.POST) .withEntity(HttpEntities.create(ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DISABLE))); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((r, t) -> { String responseMessage = getResponseMessage(r, t); logger.info("got response {}", responseMessage); assertEquals("sine-function disabled", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = { "testJobClusterDisable2" }) public void testJobClusterDelete() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(namedJobAPIEndpoint("delete")) .withEntity(HttpEntities.create(ContentTypes.APPLICATION_JSON, JobClusterPayloads.JOB_CLUSTER_DELETE))); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); assertEquals("sine-function deleted", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } }
4,242
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v0/JobStatusRouteTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.NotUsed; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.ConnectionContext; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.http.javadsl.model.ws.Message; import akka.http.javadsl.model.ws.WebSocketRequest; import akka.http.javadsl.settings.ClientConnectionSettings; import akka.http.javadsl.settings.WebSocketSettings; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import akka.stream.javadsl.Sink; import akka.stream.javadsl.Source; import akka.util.ByteString; import io.mantisrx.master.JobClustersManagerActor; import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobStatusRouteHandlerAkkaImpl; import io.mantisrx.master.events.*; import io.mantisrx.master.jobcluster.job.JobTestHelper; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.scheduler.AgentsErrorMonitorActor; import io.mantisrx.master.scheduler.FakeMantisScheduler; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import java.util.Optional; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; public class JobStatusRouteTest { private final static Logger logger = LoggerFactory.getLogger(JobStatusRouteTest.class); private final ActorMaterializer materializer = ActorMaterializer.create(system); private final Http http = Http.get(system); private static Thread t; private static final int serverPort = 8207; private static CompletionStage<ServerBinding> binding; private static ActorSystem system = ActorSystem.create("JobStatusRoute"); private static ActorRef agentsErrorMonitorActor = system.actorOf(AgentsErrorMonitorActor.props()); private static ActorRef statusEventBrokerActor = system.actorOf(StatusEventBrokerActor.props(agentsErrorMonitorActor)); @BeforeClass public static void setup() throws Exception { JobTestHelper.deleteAllFiles(); JobTestHelper.createDirsIfRequired(); final CountDownLatch latch = new CountDownLatch(1); t = new Thread(() -> { try { // boot up server using the route as defined below final Http http = Http.get(system); final ActorMaterializer materializer = ActorMaterializer.create(system); final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); ActorRef jobClustersManagerActor = system.actorOf(JobClustersManagerActor.props( new MantisJobStore(new io.mantisrx.server.master.persistence.SimpleCachedFileStorageProvider(true)), lifecycleEventPublisher), "jobClustersManager"); MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor); jobClustersManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(fakeScheduler, false), ActorRef.noSender()); agentsErrorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(fakeScheduler), ActorRef.noSender()); final JobStatusRouteHandler jobStatusRouteHandler = new JobStatusRouteHandlerAkkaImpl(system, statusEventBrokerActor); final JobStatusRoute jobStatusRoute = new JobStatusRoute(jobStatusRouteHandler); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = jobStatusRoute.createRoute(Function.identity()).flow(system, materializer); logger.info("starting test server on port {}", serverPort); latch.countDown(); binding = http.bindAndHandle(routeFlow, ConnectHttp.toHost("localhost", serverPort), materializer); } catch (Exception e) { logger.info("caught exception", e); latch.countDown(); e.printStackTrace(); } }); t.setDaemon(true); t.start(); latch.await(); } @AfterClass public static void teardown() { logger.info("JobStatusRouteTest teardown"); binding .thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done t.interrupt(); } // @Test // @Ignore public void testJobStatus() throws InterruptedException { Flow<Message, Message, NotUsed> clientFlow = Flow.fromSinkAndSource(Sink.foreach(x -> System.out.println("client got " + x.asTextMessage().getStrictText())), Source.empty()); ClientConnectionSettings defaultSettings = ClientConnectionSettings.create(system); AtomicInteger pingCounter = new AtomicInteger(); WebSocketSettings customWebsocketSettings = defaultSettings.getWebsocketSettings() .withPeriodicKeepAliveData(() -> ByteString.fromString(String.format("debug-%d", pingCounter.incrementAndGet())) ); ClientConnectionSettings customSettings = defaultSettings.withWebsocketSettings(customWebsocketSettings); http.singleWebSocketRequest( WebSocketRequest.create("ws://127.0.0.1:8207/job/status/sine-function-1"), clientFlow, ConnectionContext.noEncryption(), Optional.empty(), customSettings, system.log(), materializer ); while (pingCounter.get() != 2) { statusEventBrokerActor.tell(new LifecycleEventsProto.WorkerStatusEvent( LifecycleEventsProto.StatusEvent.StatusEventType.INFO, "test message", 1, WorkerId.fromId("sine-function-1-worker-0-2").get(), WorkerState.Started), ActorRef.noSender()); Thread.sleep(2000); } } }
4,243
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v0/JobDiscoveryRouteTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.NotUsed; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.JobClustersManagerActor; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandler; import io.mantisrx.master.api.akka.route.handlers.JobDiscoveryRouteHandlerAkkaImpl; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.job.JobTestHelper; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.scheduler.AgentsErrorMonitorActor; import io.mantisrx.master.scheduler.FakeMantisScheduler; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.core.NamedJobInfo; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import rx.Observable; import java.time.Duration; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.function.Function; public class JobDiscoveryRouteTest { private final static Logger logger = LoggerFactory.getLogger(JobDiscoveryRouteTest.class); private final ActorMaterializer materializer = ActorMaterializer.create(system); private final Http http = Http.get(system); private static Thread t; private static final int serverPort = 8217; private static volatile CompletionStage<ServerBinding> binding; private static ActorSystem system = ActorSystem.create("JobDiscoveryRoute"); private static ActorRef agentsErrorMonitorActor = system.actorOf(AgentsErrorMonitorActor.props()); private final TestMantisClient mantisClient = new TestMantisClient(serverPort); @BeforeClass public static void setup() throws Exception { JobTestHelper.deleteAllFiles(); JobTestHelper.createDirsIfRequired(); final CountDownLatch latch = new CountDownLatch(1); t = new Thread(() -> { try { // boot up server using the route as defined below final Http http = Http.get(system); final ActorMaterializer materializer = ActorMaterializer.create(system); final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); TestHelpers.setupMasterConfig(); ActorRef jobClustersManagerActor = system.actorOf(JobClustersManagerActor.props( new MantisJobStore(new io.mantisrx.server.master.persistence.SimpleCachedFileStorageProvider(true)), lifecycleEventPublisher), "jobClustersManager"); MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor); jobClustersManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(fakeScheduler, false), ActorRef.noSender()); agentsErrorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(fakeScheduler), ActorRef.noSender()); Duration idleTimeout = system.settings().config().getDuration("akka.http.server.idle-timeout"); logger.info("idle timeout {} sec ", idleTimeout.getSeconds()); final JobDiscoveryRouteHandler jobDiscoveryRouteHandler = new JobDiscoveryRouteHandlerAkkaImpl(jobClustersManagerActor, idleTimeout); final JobDiscoveryRoute jobDiscoveryRoute = new JobDiscoveryRoute(jobDiscoveryRouteHandler); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = jobDiscoveryRoute.createRoute(Function.identity()).flow(system, materializer); logger.info("starting test server on port {}", serverPort); binding = http.bindAndHandle(routeFlow, ConnectHttp.toHost("localhost", serverPort), materializer); latch.countDown(); } catch (Exception e) { logger.info("caught exception", e); latch.countDown(); e.printStackTrace(); } }); t.setDaemon(true); t.start(); latch.await(); } @AfterClass public static void teardown() { logger.info("JobDiscoveryRouteTest teardown"); if (binding != null) { binding .thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done } t.interrupt(); } @Test public void testSchedulingInfoStreamForNonExistentJob() throws InterruptedException { // The current behavior of Mantis client is to retry non-200 responses // This test overrides the default retry/repeat behavior to test a Sched info observable would complete if the job id requested is non-existent final CountDownLatch latch = new CountDownLatch(1); Observable<JobSchedulingInfo> jobSchedulingInfoObservable = mantisClient .schedulingChanges("testJobCluster-1", obs -> Observable.just(1), obs -> Observable.empty() ); jobSchedulingInfoObservable .doOnNext(x -> logger.info("onNext {}", x)) .doOnError(t -> logger.warn("onError", t)) .doOnCompleted(() -> { logger.info("onCompleted"); latch.countDown(); }) .subscribe(); latch.await(); } @Test public void testNamedJobInfoStreamForNonExistentJob() throws InterruptedException { // The current behavior of Mantis client is to retry non-200 responses // This test overrides the default retry/repeat behavior to test a namedjob info observable would complete if the job cluster requested is non-existent final CountDownLatch latch = new CountDownLatch(1); Observable<NamedJobInfo> jobSchedulingInfoObservable = mantisClient .namedJobInfo("testJobCluster", obs -> Observable.just(1), obs -> Observable.empty() ); jobSchedulingInfoObservable .doOnNext(x -> logger.info("onNext {}", x)) .doOnError(t -> logger.warn("onError", t)) .doOnCompleted(() -> { logger.info("onCompleted"); latch.countDown(); }) .subscribe(); latch.await(); } }
4,244
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v0/AgentClusterRouteTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.NotUsed; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.HttpEntity; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import akka.util.ByteString; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.fenzo.AutoScaleAction; import com.netflix.fenzo.AutoScaleRule; import com.netflix.fenzo.VirtualMachineLease; import io.mantisrx.master.JobClustersManagerActor; import io.mantisrx.master.api.akka.payloads.AgentClusterPayloads; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.scheduler.FakeMantisScheduler; import io.mantisrx.master.scheduler.JobMessageRouterImpl; import io.mantisrx.master.vm.AgentClusterOperations; import io.mantisrx.master.vm.AgentClusterOperationsImpl; import io.mantisrx.server.master.AgentClustersAutoScaler; import io.mantisrx.server.master.persistence.IMantisStorageProvider; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.SimpleCachedFileStorageProvider; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import rx.Observer; import java.io.IOException; import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Function; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class AgentClusterRouteTest { private final static Logger logger = LoggerFactory.getLogger(AgentClusterRouteTest.class); private final ActorMaterializer materializer = ActorMaterializer.create(system); private final Http http = Http.get(system); private static Thread t; private static final int serverPort = 8209; private static final ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); // private static final AgentClusterOperations agentClusterOperations = mock(AgentClusterOperations.class); private CompletionStage<String> processRespFut(final HttpResponse r, final int expectedStatusCode) { logger.info("headers {} {}", r.getHeaders(), r.status()); assertEquals(expectedStatusCode, r.status().intValue()); assert(r.getHeader("Access-Control-Allow-Origin").isPresent()); assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value()); CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer); return strictEntity.thenCompose(s -> s.getDataBytes() .runFold(ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer) .thenApply(s2 -> s2.utf8String()) ); } private String getResponseMessage(final String msg, final Throwable t) { if (t != null) { logger.error("got err ", t); fail(t.getMessage()); } else { return msg; } return ""; } private static CompletionStage<ServerBinding> binding; private static ActorSystem system = ActorSystem.create("AgentClusterRoutes"); @BeforeClass public static void setup() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); t = new Thread(() -> { try { // boot up server using the route as defined below final Http http = Http.get(system); final ActorMaterializer materializer = ActorMaterializer.create(system); IMantisStorageProvider storageProvider = new SimpleCachedFileStorageProvider(true); final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); ActorRef jobClustersManagerActor = system.actorOf( JobClustersManagerActor.props(new MantisJobStore(storageProvider), lifecycleEventPublisher), "jobClustersManager"); MantisScheduler fakeScheduler = new FakeMantisScheduler(jobClustersManagerActor); jobClustersManagerActor.tell( new JobClusterManagerProto.JobClustersManagerInitialize(fakeScheduler, false), ActorRef.noSender()); setupDummyAgentClusterAutoScaler(); final AgentClusterRoute v0AgentClusterRoute = new AgentClusterRoute( new AgentClusterOperationsImpl(storageProvider, new JobMessageRouterImpl(jobClustersManagerActor), fakeScheduler, lifecycleEventPublisher, "cluster"), system); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = v0AgentClusterRoute.createRoute(Function.identity()).flow(system, materializer); logger.info("test server starting on port {}", serverPort); binding = http.bindAndHandle(routeFlow, ConnectHttp.toHost("localhost", serverPort), materializer); latch.countDown(); } catch (Exception e) { logger.info("caught exception", e); latch.countDown(); e.printStackTrace(); } }); t.setDaemon(true); t.start(); latch.await(); } @AfterClass public static void teardown() { logger.info("V0AgentClusterRouteTest teardown"); binding .thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done t.interrupt(); } private String agentClusterEndpoint(final String endpoint) { return String.format("http://127.0.0.1:%d/api/vm/activevms/%s", serverPort, endpoint); } private static void setupDummyAgentClusterAutoScaler() { final AutoScaleRule dummyAutoScaleRule = new AutoScaleRule() { @Override public String getRuleName() { return "test"; } @Override public int getMinIdleHostsToKeep() { return 1; } @Override public int getMaxIdleHostsToKeep() { return 10; } @Override public long getCoolDownSecs() { return 300; } @Override public boolean idleMachineTooSmall(VirtualMachineLease lease) { return false; } @Override public int getMinSize() { return 1; } @Override public int getMaxSize() { return 100; } }; try { AgentClustersAutoScaler.initialize(() -> new HashSet<>(Collections.singletonList(dummyAutoScaleRule)), new Observer<AutoScaleAction>() { @Override public void onCompleted() { } @Override public void onError(Throwable e) { } @Override public void onNext(AutoScaleAction autoScaleAction) { } }); } catch (Exception e) { logger.info("AgentClustersAutoScaler is already initialized by another test", e); } } @Test() public void testSetActiveVMs() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.POST(agentClusterEndpoint(AgentClusterRoute.SETACTIVE)) .withEntity(AgentClusterPayloads.SET_ACTIVE)); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); latch.countDown(); }); assertTrue(latch.await(10, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testSetActiveVMs"}) public void testGetJobsOnVMs() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(agentClusterEndpoint(AgentClusterRoute.LISTJOBSONVMS))); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); // TODO validate jobs on VM response assertEquals("{}", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testGetJobsOnVMs"}) public void testGetAgentClustersList() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(agentClusterEndpoint(AgentClusterRoute.LISTAGENTCLUSTERS))); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); try { Map<String, AgentClusterOperations.AgentClusterAutoScaleRule> agentClusterAutoScaleRule = mapper.readValue(responseMessage, new TypeReference<Map<String, AgentClusterOperations.AgentClusterAutoScaleRule>>() {}); agentClusterAutoScaleRule.values().forEach(autoScaleRule -> { assertEquals("test", autoScaleRule.getName()); assertEquals(300, autoScaleRule.getCooldownSecs()); assertEquals(1, autoScaleRule.getMinIdle()); assertEquals(10, autoScaleRule.getMaxIdle()); assertEquals(1, autoScaleRule.getMinSize()); assertEquals(100, autoScaleRule.getMaxSize()); }); } catch (IOException e) { logger.error("caught error", e); fail("failed to deserialize response"); } // assertEquals("{}", responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } @Test(dependsOnMethods = {"testGetAgentClustersList"}) public void testGetActiveAgentClusters() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(agentClusterEndpoint(AgentClusterRoute.LISTACTIVE))); responseFuture .thenCompose(r -> processRespFut(r, 200)) .whenComplete((msg, t) -> { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); assertEquals(AgentClusterPayloads.SET_ACTIVE, responseMessage); latch.countDown(); }); assertTrue(latch.await(1, TimeUnit.SECONDS)); } }
4,245
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/v0/MasterDescriptionRouteTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.v0; import akka.NotUsed; import akka.actor.ActorSystem; import akka.http.javadsl.ConnectHttp; import akka.http.javadsl.Http; import akka.http.javadsl.ServerBinding; import akka.http.javadsl.model.HttpEntity; import akka.http.javadsl.model.HttpRequest; import akka.http.javadsl.model.HttpResponse; import akka.stream.ActorMaterializer; import akka.stream.javadsl.Flow; import akka.util.ByteString; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.api.akka.route.Jackson; import io.mantisrx.master.jobcluster.job.JobTestHelper; import io.mantisrx.server.core.master.MasterDescription; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import java.util.List; import java.util.Optional; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Function; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class MasterDescriptionRouteTest { private final static Logger logger = LoggerFactory.getLogger(MasterDescriptionRouteTest.class); private final ActorMaterializer materializer = ActorMaterializer.create(system); private final Http http = Http.get(system); private static Thread t; private static final int serverPort = 8205; private static final int targetEndpointPort = serverPort; private static final MasterDescription fakeMasterDesc = new MasterDescription( "localhost", "127.0.0.1", targetEndpointPort, targetEndpointPort + 2, targetEndpointPort + 4, "api/postjobstatus", targetEndpointPort + 6, System.currentTimeMillis()); private CompletionStage<String> processRespFut(final HttpResponse r, final Optional<Integer> expectedStatusCode) { logger.info("headers {} {}", r.getHeaders(), r.status()); expectedStatusCode.ifPresent(sc -> assertEquals(sc.intValue(), r.status().intValue())); assert(r.getHeader("Access-Control-Allow-Origin").isPresent()); assertEquals("*", r.getHeader("Access-Control-Allow-Origin").get().value()); CompletionStage<HttpEntity.Strict> strictEntity = r.entity().toStrict(1000, materializer); return strictEntity.thenCompose(s -> s.getDataBytes() .runFold(ByteString.emptyByteString(), (acc, b) -> acc.concat(b), materializer) .thenApply(s2 -> s2.utf8String()) ); } private String getResponseMessage(final String msg, final Throwable t) { if (t != null) { logger.error("got err ", t); fail(t.getMessage()); } else { return msg; } return ""; } private static CompletionStage<ServerBinding> binding; private static ActorSystem system = ActorSystem.create("MasterDescriptionRouteTest"); private static final MasterDescriptionRoute masterDescRoute; static { TestHelpers.setupMasterConfig(); masterDescRoute = new MasterDescriptionRoute(fakeMasterDesc); } @BeforeClass public static void setup() throws Exception { JobTestHelper.deleteAllFiles(); JobTestHelper.createDirsIfRequired(); final CountDownLatch latch = new CountDownLatch(1); t = new Thread(() -> { try { // boot up server using the route as defined below final Http http = Http.get(system); final ActorMaterializer materializer = ActorMaterializer.create(system); final Flow<HttpRequest, HttpResponse, NotUsed> routeFlow = masterDescRoute.createRoute(Function.identity()).flow(system, materializer); logger.info("starting test server on port {}", serverPort); latch.countDown(); binding = http.bindAndHandle(routeFlow, ConnectHttp.toHost("localhost", serverPort), materializer); } catch (Exception e) { logger.info("caught exception", e); latch.countDown(); e.printStackTrace(); } }); t.setDaemon(true); t.start(); latch.await(); } @AfterClass public static void teardown() { logger.info("MasterDescriptionRouteTest teardown"); binding .thenCompose(ServerBinding::unbind) // trigger unbinding from the port .thenAccept(unbound -> system.terminate()); // and shutdown when done t.interrupt(); } private String masterEndpoint(final String ep) { return String.format("http://127.0.0.1:%d/api/%s", targetEndpointPort, ep); } @Test public void testMasterInfoAPI() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(masterEndpoint("masterinfo"))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((msg, t) -> { try { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); MasterDescription masterDescription = Jackson.fromJSON(responseMessage, MasterDescription.class); logger.info("master desc ---> {}", masterDescription); assertEquals(fakeMasterDesc, masterDescription); } catch (Exception e) { fail("unexpected error "+ e.getMessage()); } latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } @Test public void testMasterConfigAPI() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CompletionStage<HttpResponse> responseFuture = http.singleRequest( HttpRequest.GET(masterEndpoint("masterconfig"))); responseFuture .thenCompose(r -> processRespFut(r, Optional.of(200))) .whenComplete((msg, t) -> { try { String responseMessage = getResponseMessage(msg, t); logger.info("got response {}", responseMessage); List<MasterDescriptionRoute.Configlet> masterconfig = Jackson.fromJSON(responseMessage, new TypeReference<List<MasterDescriptionRoute.Configlet>>() {}); logger.info("master config ---> {}", masterconfig); assertEquals(masterDescRoute.getConfigs(), masterconfig); } catch (Exception e) { fail("unexpected error "+ e.getMessage()); } latch.countDown(); }); assertTrue(latch.await(2, TimeUnit.SECONDS)); } }
4,246
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/route/utils/JobRouteUtilsTest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.route.utils; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import org.junit.Test; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import static org.junit.Assert.assertEquals; public class JobRouteUtilsTest { @Test public void testListJobRequest() { Map<String, List<String>> params = new HashMap<>(); params.put(JobRouteUtils.QUERY_PARAM_LIMIT, Arrays.asList("10")); params.put(JobRouteUtils.QUERY_PARAM_JOB_STATE, Arrays.asList("Active")); params.put(JobRouteUtils.QUERY_PARAM_STAGE_NUM, Arrays.asList("1")); params.put(JobRouteUtils.QUERY_PARAM_WORKER_INDEX, Arrays.asList("11")); params.put(JobRouteUtils.QUERY_PARAM_WORKER_NUM, Arrays.asList("233")); params.put(JobRouteUtils.QUERY_PARAM_WORKER_STATE, Arrays.asList("Terminal")); params.put(JobRouteUtils.QUERY_PARAM_ACTIVE_ONLY, Arrays.asList("False")); params.put(JobRouteUtils.QUERY_PARAM_LABELS_QUERY, Arrays.asList("lab1=v1,lab3=v3")); params.put(JobRouteUtils.QUERY_PARAM_LABELS_OPERAND, Arrays.asList("and")); JobClusterManagerProto.ListJobsRequest listJobsRequest = JobRouteUtils.createListJobsRequest(params, Optional.of(".*abc.*"), true); assertEquals(10, listJobsRequest.getCriteria().getLimit().get().intValue()); assertEquals(JobState.MetaState.Active, listJobsRequest.getCriteria().getJobState().get()); assertEquals(1, listJobsRequest.getCriteria().getStageNumberList().get(0).intValue()); assertEquals(11, listJobsRequest.getCriteria().getWorkerIndexList().get(0).intValue()); assertEquals(233, listJobsRequest.getCriteria().getWorkerNumberList().get(0).intValue()); assertEquals(1, listJobsRequest.getCriteria().getWorkerStateList().size()); assertEquals(WorkerState.MetaState.Terminal, listJobsRequest.getCriteria().getWorkerStateList().get(0)); assertEquals(false, listJobsRequest.getCriteria().getActiveOnly().get()); assertEquals(2, listJobsRequest.getCriteria().getMatchingLabels().size()); assertEquals("lab1", listJobsRequest.getCriteria().getMatchingLabels().get(0).getName()); assertEquals("v1", listJobsRequest.getCriteria().getMatchingLabels().get(0).getValue()); assertEquals("lab3", listJobsRequest.getCriteria().getMatchingLabels().get(1).getName()); assertEquals("v3", listJobsRequest.getCriteria().getMatchingLabels().get(1).getValue()); assertEquals("and", listJobsRequest.getCriteria().getLabelsOperand().get()); assertEquals(".*abc.*", listJobsRequest.getCriteria().getMatchingRegex().get()); } @Test public void testListJobRequestDefaults() { JobClusterManagerProto.ListJobsRequest listJobsRequest2 = JobRouteUtils.createListJobsRequest(new HashMap<>(), Optional.empty(), true); assertEquals(false, listJobsRequest2.getCriteria().getLimit().isPresent()); assertEquals(false, listJobsRequest2.getCriteria().getJobState().isPresent()); assertEquals(0, listJobsRequest2.getCriteria().getStageNumberList().size()); assertEquals(0, listJobsRequest2.getCriteria().getWorkerIndexList().size()); assertEquals(0, listJobsRequest2.getCriteria().getWorkerNumberList().size()); assertEquals(0, listJobsRequest2.getCriteria().getWorkerStateList().size()); assertEquals(true, listJobsRequest2.getCriteria().getActiveOnly().get()); assertEquals(0, listJobsRequest2.getCriteria().getMatchingLabels().size()); assertEquals(false, listJobsRequest2.getCriteria().getLabelsOperand().isPresent()); assertEquals(false, listJobsRequest2.getCriteria().getMatchingRegex().isPresent()); } }
4,247
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/payloads/JobPayloads.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.payloads; public class JobPayloads { public static final String RESUBMIT_WORKER = "{" + "\"JobId\": \"sine-function-1\"," + "\"user\": \"JobRouteTest\"," + "\"workerNumber\": 2," + "\"reason\": \"test worker resubmit\"}"; public static final String RESUBMIT_WORKER_NONEXISTENT = "{" + "\"JobId\": \"NonExistent-1\"," + "\"user\": \"JobRouteTest\"," + "\"workerNumber\": 2," + "\"reason\": \"test worker resubmit\"}"; public static final String SCALE_STAGE = "{" + "\"JobId\": \"sine-function-1\"," + "\"NumWorkers\": 3," + "\"StageNumber\": 1," + "\"Reason\": \"test stage scaling\"}"; public static final String SCALE_STAGE_NonExistent = "{" + "\"JobId\": \"NonExistent-1\"," + "\"NumWorkers\": 3," + "\"StageNumber\": 1," + "\"Reason\": \"test stage scaling\"}"; public static final String KILL_JOB = "{" + "\"JobId\": \"sine-function-1\"," + "\"user\": \"JobRouteTest\"," + "\"reason\": \"test job kill\"}"; public static final String KILL_JOB_NonExistent = "{" + "\"JobId\": \"NonExistent-1\"," + "\"user\": \"JobRouteTest\"," + "\"reason\": \"test job kill\"}"; public static final String JOB_STATUS = "{\"jobId\":\"sine-function-1\",\"status\":{\"jobId\":\"sine-function-1\",\"stageNum\":1,\"workerIndex\":0,\"workerNumber\":2,\"type\":\"HEARTBEAT\",\"message\":\"heartbeat\",\"state\":\"Noop\",\"hostname\":null,\"timestamp\":1525813363585,\"reason\":\"Normal\",\"payloads\":[{\"type\":\"SubscriptionState\",\"data\":\"false\"},{\"type\":\"IncomingDataDrop\",\"data\":\"{\\\"onNextCount\\\":0,\\\"droppedCount\\\":0}\"}]}}"; }
4,248
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/payloads/JobClusterPayloads.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.payloads; public class JobClusterPayloads { public static final String JOB_CLUSTER_CREATE = "{\"jobDefinition\":{\"name\":\"sine-function\",\"user\":\"nmahilani\",\"jobJarFileLocation\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.2.9.zip\"," + "\"version\":\"0.2.9 2018-05-29 16:12:56\",\"schedulingInfo\":{\"stages\":{" + "\"1\":{\"numberOfInstances\":\"1\",\"machineDefinition\":{\"cpuCores\":\"1\",\"memoryMB\":\"1024\",\"diskMB\":\"1024\",\"networkMbps\":\"128\",\"numPorts\":\"1\"},\"scalable\":true," + "\"scalingPolicy\":{\"stage\":1,\"min\":\"1\",\"max\":\"10\",\"increment\":\"2\",\"decrement\":\"1\",\"coolDownSecs\":\"600\"," + "\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":\"15\",\"scaleUpAbovePct\":\"75\",\"rollingCount\":{\"count\":\"12\",\"of\":\"20\"}}},\"enabled\":true},\"softConstraints\":[],\"hardConstraints\":[]}}}," + "\"parameters\":[],\"labels\":[{\"name\":\"_mantis.user\",\"value\":\"nmahilani\"},{\"name\":\"_mantis.ownerEmail\",\"value\":\"nmahilani@netflix.com\"},{\"name\":\"_mantis.jobType\",\"value\":\"other\"},{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.artifact.version\",\"value\":\"0.2.9\"}]," + "\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":25,\\\"intervalMs\\\":60000}\"},\"slaMin\":\"0\",\"slaMax\":\"0\",\"cronSpec\":null,\"cronPolicy\":\"KEEP_EXISTING\",\"isReadyForJobMaster\":true}," + "\"owner\":{\"contactEmail\":\"nmahilani@netflix.com\",\"description\":\"\",\"name\":\"Nick Mahilani\",\"repo\":\"\",\"teamName\":\"\"}}"; public static final String JOB_CLUSTER_VALID_UPDATE = "{\"jobDefinition\":{\"name\":\"sine-function\",\"user\":\"nmahilani\",\"jobJarFileLocation\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.2.9.zip\"," + "\"version\":\"0.2.9 2018-05-29 new version\",\"schedulingInfo\":{\"stages\":{" + "\"1\":{\"numberOfInstances\":\"1\",\"machineDefinition\":{\"cpuCores\":\"1\",\"memoryMB\":\"1024\",\"diskMB\":\"1024\",\"networkMbps\":\"128\",\"numPorts\":\"1\"},\"scalable\":true," + "\"scalingPolicy\":{\"stage\":1,\"min\":\"1\",\"max\":\"10\",\"increment\":\"2\",\"decrement\":\"1\",\"coolDownSecs\":\"600\"," + "\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":\"15\",\"scaleUpAbovePct\":\"75\",\"rollingCount\":{\"count\":\"12\",\"of\":\"20\"}}},\"enabled\":true},\"softConstraints\":[],\"hardConstraints\":[]}}}," + "\"parameters\":[],\"labels\":[{\"name\":\"_mantis.user\",\"value\":\"nmahilani\"},{\"name\":\"_mantis.ownerEmail\",\"value\":\"nmahilani@netflix.com\"},{\"name\":\"_mantis.jobType\",\"value\":\"other\"},{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.artifact.version\",\"value\":\"0.2.9\"}]," + "\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":25,\\\"intervalMs\\\":60000}\"},\"slaMin\":\"0\",\"slaMax\":\"0\",\"cronSpec\":null,\"cronPolicy\":\"KEEP_EXISTING\",\"isReadyForJobMaster\":true}," + "\"owner\":{\"contactEmail\":\"nmahilani@netflix.com\",\"description\":\"\",\"name\":\"Nick Mahilani\",\"repo\":\"\",\"teamName\":\"\"}}"; public static final String JOB_CLUSTER_INVALID_UPDATE = "{\"jobDefinition\":{\"name\":\"NonExistent\",\"user\":\"nmahilani\",\"jobJarFileLocation\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.2.9.zip\"," + "\"version\":\"0.2.9 2018-05-29 new version\",\"schedulingInfo\":{\"stages\":{" + "\"1\":{\"numberOfInstances\":\"1\",\"machineDefinition\":{\"cpuCores\":\"1\",\"memoryMB\":\"1024\",\"diskMB\":\"1024\",\"networkMbps\":\"128\",\"numPorts\":\"1\"},\"scalable\":true," + "\"scalingPolicy\":{\"stage\":1,\"min\":\"1\",\"max\":\"10\",\"increment\":\"2\",\"decrement\":\"1\",\"coolDownSecs\":\"600\"," + "\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":\"15\",\"scaleUpAbovePct\":\"75\",\"rollingCount\":{\"count\":\"12\",\"of\":\"20\"}}},\"enabled\":true},\"softConstraints\":[],\"hardConstraints\":[]}}}," + "\"parameters\":[],\"labels\":[{\"name\":\"_mantis.user\",\"value\":\"nmahilani\"},{\"name\":\"_mantis.ownerEmail\",\"value\":\"nmahilani@netflix.com\"},{\"name\":\"_mantis.jobType\",\"value\":\"other\"},{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.artifact.version\",\"value\":\"0.2.9\"}]," + "\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":25,\\\"intervalMs\\\":60000}\"},\"slaMin\":\"0\",\"slaMax\":\"0\",\"cronSpec\":null,\"cronPolicy\":\"KEEP_EXISTING\",\"isReadyForJobMaster\":true}," + "\"owner\":{\"contactEmail\":\"nmahilani@netflix.com\",\"description\":\"\",\"name\":\"Nick Mahilani\",\"repo\":\"\",\"teamName\":\"\"}}"; public static final String JOB_CLUSTER_DELETE = "{\n" + " \"name\": \"sine-function\",\n" + " \"user\": \"test\"}"; public static final String JOB_CLUSTER_DISABLE = "{\n" + " \"name\": \"sine-function\",\n" + " \"user\": \"test\"}"; public static final String JOB_CLUSTER_DISABLE_NONEXISTENT = "{\n" + " \"name\": \"NonExistent\",\n" + " \"user\": \"test\"}"; public static final String JOB_CLUSTER_ENABLE = "{\n" + " \"name\": \"sine-function\",\n" + " \"user\": \"test\"}"; public static final String JOB_CLUSTER_ENABLE_NONEXISTENT = "{\n" + " \"name\": \"NonExistent\",\n" + " \"user\": \"test\"}"; public static final String JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT = "\n" + "{\"name\":\"sine-function\",\"version\":\"0.1.39 2018-03-13 09:40:53\",\"url\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.1.39.zip\",\"skipsubmit\":true,\"user\":\"nmahilani\"}"; public static final String JOB_CLUSTER_QUICK_UPDATE_AND_SKIP_SUBMIT_NON_EXISTENT = "\n" + "{\"name\":\"NonExistent\",\"version\":\"0.1.39 2018-03-13 09:40:53\",\"url\":\"https://mantis.staging.us-east-1.prod.netflix.net/mantis-artifacts/mantis-examples-sine-function-0.1.39.zip\",\"skipsubmit\":true,\"user\":\"nmahilani\"}"; public static final String JOB_CLUSTER_UPDATE_SLA = "{\"user\":\"nmahilani\",\"name\":\"sine-function\",\"min\":\"0\",\"max\":\"1\",\"cronspec\":\"\",\"cronpolicy\":\"KEEP_EXISTING\",\"forceenable\":false}"; public static final String JOB_CLUSTER_UPDATE_SLA_NONEXISTENT = "{\"user\":\"nmahilani\",\"name\":\"NonExistent\",\"min\":\"0\",\"max\":\"1\",\"cronspec\":\"\",\"cronpolicy\":\"KEEP_EXISTING\",\"forceenable\":false}"; public static final String JOB_CLUSTER_UPDATE_LABELS = "{\"name\":\"sine-function\",\"labels\":[{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.dataOrigin\",\"value\":\"none\"}],\"user\":\"nmahilani\"}"; public static final String JOB_CLUSTER_UPDATE_LABELS_NONEXISTENT = "{\"name\":\"NonExistent\",\"labels\":[{\"name\":\"_mantis.criticality\",\"value\":\"low\"},{\"name\":\"_mantis.dataOrigin\",\"value\":\"none\"}],\"user\":\"nmahilani\"}"; public static final String MIGRATE_STRATEGY_UPDATE = "{\"name\":\"sine-function\",\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":99,\\\"intervalMs\\\":10000}\"},\"user\":\"nmahilani\"}"; public static final String MIGRATE_STRATEGY_UPDATE_NONEXISTENT = "{\"name\":\"NonExistent\",\"migrationConfig\":{\"strategy\":\"PERCENTAGE\",\"configString\":\"{\\\"percentToMove\\\":99,\\\"intervalMs\\\":10000}\"},\"user\":\"nmahilani\"}"; public static final String QUICK_SUBMIT = "{\"name\":\"sine-function\",\"user\":\"nmahilani\",\"jobSla\":{\"durationType\":\"Perpetual\",\"runtimeLimitSecs\":\"0\",\"minRuntimeSecs\":\"0\",\"userProvidedType\":\"\"}}"; public static final String QUICK_SUBMIT_NONEXISTENT = "{\"name\":\"NonExistent\",\"user\":\"nmahilani\",\"jobSla\":{\"durationType\":\"Perpetual\",\"runtimeLimitSecs\":\"0\",\"minRuntimeSecs\":\"0\",\"userProvidedType\":\"\"}}"; public static final String JOB_CLUSTER_SUBMIT = "{\"name\":\"sine-function\",\"user\":\"nmahilani\",\"jobJarFileLocation\":\"\",\"version\":\"0.2.9 2018-05-29 16:12:56\"," + "\"jobSla\":{\"durationType\":\"Perpetual\",\"runtimeLimitSecs\":\"0\",\"minRuntimeSecs\":\"0\",\"userProvidedType\":\"\"}," + "\"schedulingInfo\":{\"stages\":{\"0\":{\"numberOfInstances\":1,\"machineDefinition\":{\"cpuCores\":1,\"memoryMB\":200,\"diskMB\":1024,\"networkMbps\":128,\"numPorts\":\"1\"},\"scalable\":false}," + "\"1\":{\"numberOfInstances\":1,\"machineDefinition\":{\"cpuCores\":1,\"memoryMB\":200,\"diskMB\":1024,\"networkMbps\":128,\"numPorts\":\"1\"},\"scalable\":true," + "\"scalingPolicy\":{\"stage\":1,\"min\":1,\"max\":10,\"increment\":2,\"decrement\":1,\"coolDownSecs\":600," + "\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":15,\"scaleUpAbovePct\":75,\"rollingCount\":{\"count\":12,\"of\":20}}},\"enabled\":true}," + "\"softConstraints\":[\"M4Cluster\"],\"hardConstraints\":[]}}},\"parameters\":[{\"name\":\"useRandom\",\"value\":\"True\"}, {\"name\":\"periodInSeconds\",\"value\":2}],\"isReadyForJobMaster\":true}"; public static final String JOB_CLUSTER_SUBMIT_NonExistent = "{\"name\":\"NonExistent\",\"user\":\"nmahilani\",\"jobJarFileLocation\":\"\",\"version\":\"0.2.9 2018-05-29 16:12:56\"," + "\"jobSla\":{\"durationType\":\"Perpetual\",\"runtimeLimitSecs\":\"0\",\"minRuntimeSecs\":\"0\",\"userProvidedType\":\"\"}," + "\"schedulingInfo\":{\"stages\":{\"0\":{\"numberOfInstances\":1,\"machineDefinition\":{\"cpuCores\":1,\"memoryMB\":200,\"diskMB\":1024,\"networkMbps\":128,\"numPorts\":\"1\"},\"scalable\":false}," + "\"1\":{\"numberOfInstances\":1,\"machineDefinition\":{\"cpuCores\":1,\"memoryMB\":200,\"diskMB\":1024,\"networkMbps\":128,\"numPorts\":\"1\"},\"scalable\":true," + "\"scalingPolicy\":{\"stage\":1,\"min\":1,\"max\":10,\"increment\":2,\"decrement\":1,\"coolDownSecs\":600," + "\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":15,\"scaleUpAbovePct\":75,\"rollingCount\":{\"count\":12,\"of\":20}}},\"enabled\":true}," + "\"softConstraints\":[\"M4Cluster\"],\"hardConstraints\":[]}}},\"parameters\":[{\"name\":\"useRandom\",\"value\":\"True\"}, {\"name\":\"periodInSeconds\",\"value\":2}],\"isReadyForJobMaster\":true}"; }
4,249
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/api/akka/payloads/AgentClusterPayloads.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.api.akka.payloads; public class AgentClusterPayloads { public static final String SET_ACTIVE = "[\"mantistestagent-main-ec2-1\",\"mantistestagent-main-ec2-2\"]"; }
4,250
0
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/test/java/io/mantisrx/master/events/WorkerRegistryV2Test.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.events; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.testkit.javadsl.TestKit; import io.mantisrx.shaded.com.google.common.collect.Lists; import com.netflix.mantis.master.scheduler.TestHelpers; import io.mantisrx.master.jobcluster.WorkerInfoListHolder; import io.mantisrx.master.jobcluster.job.IMantisStageMetadata; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.job.JobTestHelper; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.jobcluster.proto.JobClusterProto; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.IMantisStorageProvider; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.exceptions.InvalidJobException; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.scheduler.WorkerRegistry; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import java.util.*; import java.util.concurrent.*; import static io.mantisrx.master.events.LifecycleEventsProto.StatusEvent.StatusEventType.INFO; import static io.mantisrx.master.jobcluster.job.worker.MantisWorkerMetadataImpl.MANTIS_SYSTEM_ALLOCATED_NUM_PORTS; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS; import static junit.framework.TestCase.assertTrue; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; public class WorkerRegistryV2Test { static ActorSystem system; private static TestKit probe; private static MantisJobStore jobStore; private static IMantisStorageProvider storageProvider; private static final String user = "mantis"; @BeforeClass public static void setup() { system = ActorSystem.create(); probe = new TestKit(system); // JobTestHelper.createDirsIfRequired(); TestHelpers.setupMasterConfig(); // storageProvider = new MantisStorageProviderAdapter(new io.mantisrx.server.master.store.SimpleCachedFileStorageProvider(), eventPublisher); // jobStore = new MantisJobStore(storageProvider); } @AfterClass public static void tearDown() { //((SimpleCachedFileStorageProvider)storageProvider).deleteAllFiles(); //JobTestHelper.deleteAllFiles(); TestKit.shutdownActorSystem(system); system = null; } @Test public void testGetRunningCount() { WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2(); initRegistryWithWorkers(workerRegistryV2,"testGetRunningCount-1", 5); assertEquals(5, workerRegistryV2.getNumRunningWorkers()); } @Test public void testIsWorkerValid() { JobId jId = new JobId("testIsWorkerValid",1); WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2(); initRegistryWithWorkers(workerRegistryV2,"testIsWorkerValid-1", 5); for(int i=0; i<5; i++) { assertTrue(workerRegistryV2.isWorkerValid(new WorkerId(jId.getId(),i,i+5))); } } @Test public void testGetAllRunningWorkers() { WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2(); initRegistryWithWorkers(workerRegistryV2,"testGetAllRunningWorkers-1", 5); Set<WorkerId> allRunningWorkers = workerRegistryV2.getAllRunningWorkers(); assertEquals(5, allRunningWorkers.size()); } @Test public void testGetSlaveIdMappings() { WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2(); initRegistryWithWorkers(workerRegistryV2,"testGetSlaveIdMappings-1", 5); Map<WorkerId, String> workerIdToSlaveIdMap = workerRegistryV2.getAllRunningWorkerSlaveIdMappings(); assertEquals(5, workerIdToSlaveIdMap.size()); for(int i=0; i<5; i++) { assertEquals("slaveId-"+i, workerIdToSlaveIdMap.get(new WorkerId("testGetSlaveIdMappings-1",i,i+5))); } } @Test public void testGetAcceptedAt() { WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2(); initRegistryWithWorkers(workerRegistryV2,"testGetAcceptedAt-1", 5); Optional<Long> acceptedAt = workerRegistryV2.getAcceptedAt(new WorkerId("testGetAcceptedAt-1", 0, 5)); assertTrue(acceptedAt.isPresent()); assertEquals(new Long(0), acceptedAt.get()); // try an invalid worker acceptedAt = workerRegistryV2.getAcceptedAt(new WorkerId("testGetAcceptedAt-1",10,1)); assertFalse(acceptedAt.isPresent()); } @Test public void testJobCompleteCleanup() { WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2(); JobId jobId = new JobId("testJobCompleteCleanup", 1); initRegistryWithWorkers(workerRegistryV2, "testJobCompleteCleanup-1", 5); assertEquals(5, workerRegistryV2.getNumRunningWorkers()); workerRegistryV2.process(new LifecycleEventsProto.JobStatusEvent(INFO, "job shutdown", jobId, JobState.Failed)); assertEquals(0, workerRegistryV2.getNumRunningWorkers()); } @Test public void testJobScaleUp() throws Exception, InvalidJobException, io.mantisrx.runtime.command.InvalidJobException { WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2(); LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new DummyWorkerEventSubscriberImpl(workerRegistryV2)); Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>(); smap.put(StageScalingPolicy.ScalingReason.CPU, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.CPU, 0.5, 0.75, null)); smap.put(StageScalingPolicy.ScalingReason.DataDrop, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.DataDrop, 0.0, 2.0, null)); SchedulingInfo sInfo = new SchedulingInfo.Builder() .numberOfStages(1) .multiWorkerScalableStageWithConstraints(1, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap)) .build(); String clusterName = "testJobScaleUp"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, eventPublisher); assertEquals(2, workerRegistryV2.getNumRunningWorkers()); // send scale up request jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName+"-1", 1, 2, "", ""), probe.getRef()); JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class); System.out.println("ScaleupResp " + scaleResp.message); assertEquals(SUCCESS, scaleResp.responseCode); assertEquals(2,scaleResp.getActualNumWorkers()); JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobActor,clusterName+"-1",0,new WorkerId(clusterName+"-1",1,3)); jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("user", new JobId(clusterName,1)),probe.getRef()); JobClusterManagerProto.GetJobDetailsResponse resp = probe.expectMsgClass(JobClusterManagerProto.GetJobDetailsResponse.class); Map<Integer, ? extends IMantisStageMetadata> stageMetadata = resp.getJobMetadata().get().getStageMetadata(); assertEquals(2, stageMetadata.get(1).getAllWorkers().size()); int cnt = 0; for(int i=0; i<50; i++) { cnt++; if(workerRegistryV2.getNumRunningWorkers() == 3) { break; } } assertTrue(cnt < 50); } @Test public void testJobScaleDown() throws Exception { WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2(); LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new DummyWorkerEventSubscriberImpl(workerRegistryV2)); Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>(); smap.put(StageScalingPolicy.ScalingReason.CPU, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.CPU, 0.5, 0.75, null)); smap.put(StageScalingPolicy.ScalingReason.DataDrop, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.DataDrop, 0.0, 2.0, null)); SchedulingInfo sInfo = new SchedulingInfo.Builder() .numberOfStages(1) .multiWorkerScalableStageWithConstraints(2, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap)) .build(); String clusterName = "testJobScaleDown"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, eventPublisher); assertEquals(3, workerRegistryV2.getNumRunningWorkers()); // send scale down request jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName+"-1",1, 1, "", ""), probe.getRef()); JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class); System.out.println("ScaleDownResp " + scaleResp.message); assertEquals(SUCCESS, scaleResp.responseCode); assertEquals(1,scaleResp.getActualNumWorkers()); jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("user", new JobId(clusterName,1)),probe.getRef()); JobClusterManagerProto.GetJobDetailsResponse resp = probe.expectMsgClass(JobClusterManagerProto.GetJobDetailsResponse.class); Map<Integer, ? extends IMantisStageMetadata> stageMetadata = resp.getJobMetadata().get().getStageMetadata(); assertEquals(1, stageMetadata.get(1).getAllWorkers().size()); int cnt = 0; for(int i=0; i<50; i++) { cnt++; if(workerRegistryV2.getNumRunningWorkers() == 2) { break; } } assertTrue(cnt < 50); // assertEquals(2, WorkerRegistryV2.INSTANCE.getNumRunningWorkers()); } @Test public void testJobShutdown() { WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2(); LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new DummyWorkerEventSubscriberImpl(workerRegistryV2)); Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>(); smap.put(StageScalingPolicy.ScalingReason.CPU, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.CPU, 0.5, 0.75, null)); smap.put(StageScalingPolicy.ScalingReason.DataDrop, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.DataDrop, 0.0, 2.0, null)); SchedulingInfo sInfo = new SchedulingInfo.Builder() .numberOfStages(1) .multiWorkerScalableStageWithConstraints(1, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap)) .build(); String clusterName = "testJobShutdown"; MantisScheduler schedulerMock = mock(MantisScheduler.class); MantisJobStore jobStoreMock = mock(MantisJobStore.class); try { ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, eventPublisher); assertEquals(2, workerRegistryV2.getNumRunningWorkers()); jobActor.tell(new JobClusterProto.KillJobRequest( new JobId(clusterName,1), "test reason", JobCompletedReason.Normal, "nj", probe.getRef()), probe.getRef()); probe.expectMsgClass(JobClusterProto.KillJobResponse.class); Thread.sleep(1000); int cnt = 0; for(int i=0; i<100; i++) { cnt++; if(workerRegistryV2.getNumRunningWorkers() == 0) { break; } } assertTrue(cnt < 100); // assertEquals(0, WorkerRegistryV2.INSTANCE.getNumRunningWorkers()); } catch (Exception e) { e.printStackTrace(); } } // @Test public void multiThreadAccessTest() { WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2(); CountDownLatch latch = new CountDownLatch(1); List<Writer> writerList = generateWriters(workerRegistryV2,4, latch); TotalWorkerCountReader reader = new TotalWorkerCountReader(workerRegistryV2, latch); ExecutorService fixedThreadPoolExecutor = Executors.newFixedThreadPool(5); try { Future<Integer> maxCountSeen = fixedThreadPoolExecutor.submit(reader); fixedThreadPoolExecutor.invokeAll(writerList); int expectedCount = workerRegistryV2.getNumRunningWorkers(); System.out.println("Actual no of workers " + workerRegistryV2.getNumRunningWorkers()); int maxSeenCount = maxCountSeen.get(); System.out.println("Max Count seen " + maxCountSeen.get()); assertEquals(expectedCount, maxSeenCount); } catch (InterruptedException e) { fail(); e.printStackTrace(); } catch (ExecutionException e) { fail(); e.printStackTrace(); } } List<Writer> generateWriters(WorkerEventSubscriber subscriber, int count, CountDownLatch latch) { List<Writer> writerList = new ArrayList<>(); for(int i=0; i<count ;i++) { JobId jId = new JobId("multiThreadAccessTest" + i, 1); Writer writer1 = new Writer(subscriber, jId, 10, latch); writerList.add(writer1); } return writerList; } private void initRegistryWithWorkers(WorkerRegistryV2 workerRegistryV2, String jobId, int noOfWorkers) { LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new NoOpWorkerEventSubscriberImpl()); JobId jId = JobId.fromId(jobId).get(); List<IMantisWorkerMetadata> workerMetadataList = new ArrayList<>(); for(int i=0; i<noOfWorkers; i++) { JobWorker jb = new JobWorker.Builder() .withAcceptedAt(i) .withJobId(jId) .withSlaveID("slaveId-" + i) .withState(WorkerState.Launched) .withWorkerIndex(i) .withWorkerNumber(i+5) .withStageNum(1) .withNumberOfPorts(1 + MANTIS_SYSTEM_ALLOCATED_NUM_PORTS) .withLifecycleEventsPublisher(eventPublisher) .build(); workerMetadataList.add(jb.getMetadata()); } LifecycleEventsProto.WorkerListChangedEvent workerListChangedEvent = new LifecycleEventsProto.WorkerListChangedEvent(new WorkerInfoListHolder(jId, workerMetadataList)); workerRegistryV2.process(workerListChangedEvent); } class DummyWorkerEventSubscriberImpl implements WorkerEventSubscriber { WorkerEventSubscriber workerRegistry; public DummyWorkerEventSubscriberImpl(WorkerEventSubscriber wr) { this.workerRegistry = wr; } @Override public void process(LifecycleEventsProto.WorkerListChangedEvent event) { workerRegistry.process(event); } @Override public void process(LifecycleEventsProto.JobStatusEvent statusEvent) { workerRegistry.process(statusEvent); } } class NoOpWorkerEventSubscriberImpl implements WorkerEventSubscriber { @Override public void process(LifecycleEventsProto.WorkerListChangedEvent event) { } @Override public void process(LifecycleEventsProto.JobStatusEvent statusEvent) { } } class Writer implements Callable<Void> { private final int noOfWorkers; private final JobId jobId; WorkerEventSubscriber subscriber; CountDownLatch latch; public Writer(WorkerEventSubscriber subscriber, JobId jobId, int totalWorkerCount, CountDownLatch latch) { this.subscriber = subscriber; this.jobId = jobId; this.noOfWorkers = totalWorkerCount; this.latch = latch; } @Override public Void call() throws Exception { LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new NoOpWorkerEventSubscriberImpl()); List<IMantisWorkerMetadata> workerMetadataList = new ArrayList<>(); for(int i=0; i<noOfWorkers; i++) { JobWorker jb = new JobWorker.Builder() .withAcceptedAt(i) .withJobId(jobId) .withSlaveID("slaveId-" + i) .withState(WorkerState.Launched) .withWorkerIndex(i) .withWorkerNumber(i+5) .withStageNum(1) .withNumberOfPorts(1 + MANTIS_SYSTEM_ALLOCATED_NUM_PORTS) .withLifecycleEventsPublisher(eventPublisher) .build(); workerMetadataList.add(jb.getMetadata()); } latch.await(); for(int j =1; j<=noOfWorkers; j++) { LifecycleEventsProto.WorkerListChangedEvent workerListChangedEvent = new LifecycleEventsProto.WorkerListChangedEvent(new WorkerInfoListHolder(jobId, workerMetadataList.subList(0, j))); subscriber.process(workerListChangedEvent); } // for(int j =noOfWorkers-1; j>0; j--) { // LifecycleEventsProto.WorkerListChangedEvent workerListChangedEvent = new LifecycleEventsProto.WorkerListChangedEvent(new WorkerInfoListHolder(jobId, workerMetadataList.subList(0, j))); // subscriber.process(workerListChangedEvent); // } return null; } } class TotalWorkerCountReader implements Callable<Integer> { private final WorkerRegistry registry; private final CountDownLatch latch; public TotalWorkerCountReader(WorkerRegistryV2 registry, CountDownLatch latch) { this.registry = registry; this.latch = latch; } @Override public Integer call() throws Exception { int max = 0; latch.countDown(); for(int i=0; i<100; i++) { int cnt = registry.getNumRunningWorkers(); System.out.println("Total Cnt " + cnt); if(cnt > max) { max = cnt; } } return max; } } }
4,251
0
Create_ds/mantis-control-plane/server/src/test/java/com/netflix/mantis/master
Create_ds/mantis-control-plane/server/src/test/java/com/netflix/mantis/master/scheduler/TestHelpers.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.mantis.master.scheduler; import com.netflix.fenzo.VirtualMachineLease; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.descriptor.StageSchedulingInfo; import io.mantisrx.server.core.domain.JobMetadata; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.config.StaticPropertiesConfigurationFactory; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.mesos.VirtualMachineLeaseMesosImpl; import io.mantisrx.server.master.scheduler.ScheduleRequest; import org.apache.mesos.Protos; import java.util.Collections; import java.util.Optional; import java.util.Properties; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class TestHelpers { public static VirtualMachineLeaseMesosImpl createMockLease(final String id, final String hostname, final String vmId, final double cpuCores, final double memoryMB, final double networkMbps, final double diskMB, final VirtualMachineLease.Range range) { final VirtualMachineLeaseMesosImpl lease = mock(VirtualMachineLeaseMesosImpl.class); when(lease.hostname()).thenReturn(hostname); when(lease.getId()).thenReturn(id); when(lease.cpuCores()).thenReturn(cpuCores); when(lease.diskMB()).thenReturn(diskMB); when(lease.networkMbps()).thenReturn(networkMbps); when(lease.memoryMB()).thenReturn(memoryMB); when(lease.getAttributeMap()).thenReturn(Collections.emptyMap()); when(lease.getVMID()).thenReturn(vmId); when(lease.portRanges()).thenReturn(Collections.singletonList(range)); final Protos.Offer offer = Protos.Offer.newBuilder().setId(Protos.OfferID.newBuilder().setValue(id).build()) .setFrameworkId(Protos.FrameworkID.newBuilder().setValue("TestFramework").build()) .setHostname(hostname) .setSlaveId(Protos.SlaveID.newBuilder().setValue(vmId).build()) .build(); when(lease.getOffer()).thenReturn(offer); return lease; } public static ScheduleRequest createFakeScheduleRequest(final WorkerId workerId, final int stageNum, final int numStages, final MachineDefinition machineDefinition) { try { JobDefinition jobDefinition = new JobDefinition.Builder() .withArtifactName("jar") .withSchedulingInfo(new SchedulingInfo(Collections.singletonMap(0, new StageSchedulingInfo(1, machineDefinition, Collections.emptyList(), Collections.emptyList(), null, false)) )) .withJobSla(new JobSla(0, 0, null, MantisJobDurationType.Perpetual, null)) .build(); IMantisJobMetadata mantisJobMetadata = new MantisJobMetadataImpl.Builder() .withJobId(JobId.fromId(workerId.getJobId()).get()) .withJobDefinition(jobDefinition) .build(); return new ScheduleRequest( workerId, stageNum, numStages, new JobMetadata(mantisJobMetadata.getJobId().getId(), mantisJobMetadata.getJobJarUrl(), mantisJobMetadata.getTotalStages(), mantisJobMetadata.getUser(), mantisJobMetadata.getSchedulingInfo(), mantisJobMetadata.getParameters(), mantisJobMetadata.getSubscriptionTimeoutSecs(), mantisJobMetadata.getMinRuntimeSecs() ), mantisJobMetadata.getSla().get().getDurationType(), machineDefinition, Collections.emptyList(), Collections.emptyList(), 0,Optional.empty() ); } catch (Exception e) { e.printStackTrace(); return null; } } public static void setupMasterConfig() { final Properties props = new Properties(); props.setProperty("mantis.master.consoleport", "8080"); props.setProperty("mantis.master.apiport", "7070"); props.setProperty("mantis.master.metrics.port", "7102"); props.setProperty("mantis.master.apiportv2", "7075"); props.setProperty("mantis.master.schedInfoPort", "7076"); props.setProperty("mantis.master.workqueuelength", "100"); props.setProperty("mantis.master.storageProvider", "io.mantisrx.server.master.store.NoopStorageProvider"); props.setProperty("mantis.master.api.status.path", "api/postjobstatus"); props.setProperty("mantis.master.mesos.failover.timeout.ms", "1000.0"); props.setProperty("mantis.worker.executor.name", "Mantis Worker Executor"); props.setProperty("mantis.localmode", "true"); props.setProperty("mantis.zookeeper.connectionTimeMs", "1000"); props.setProperty("mantis.zookeeper.connection.retrySleepMs", "100"); props.setProperty("mantis.zookeeper.connection.retryCount", "3"); props.setProperty("mantis.zookeeper.connectString", "ec2-50-19-255-1.compute-1.amazonaws.com:2181,ec2-54-235-159-245.compute-1.amazonaws.com:2181,ec2-50-19-255-97.compute-1.amazonaws.com:2181,ec2-184-73-152-248.compute-1.amazonaws.com:2181,ec2-50-17-247-179.compute-1.amazonaws.com:2181"); props.setProperty("mantis.zookeeper.root", "/mantis/master"); props.setProperty("mantis.zookeeper.leader.election.path", "/hosts"); props.setProperty("mantis.zookeeper.leader.announcement.path", "/leader"); props.setProperty("mesos.master.location", "127.0.0.1:5050"); props.setProperty("mesos.worker.executorscript", "startup.sh"); props.setProperty("mesos.worker.installDir", "/tmp/mantisWorkerInstall"); props.setProperty("mantis.master.framework.name", "MantisFramework"); props.setProperty("mesos.worker.timeoutSecondsToReportStart", "5"); props.setProperty("mesos.lease.offer.expiry.secs", "1"); props.setProperty("mantis.master.stage.assignment.refresh.interval.ms","-1"); ConfigurationProvider.initialize(new StaticPropertiesConfigurationFactory(props)); } // public static MantisSchedulerFenzoImpl createMantisScheduler(final VMResourceManager vmResourceManager, // final JobMessageRouter jobMessageRouter, // final WorkerRegistry workerRegistry, // final AgentClustersAutoScaler agentClustersAutoScaler) { // final ClusterResourceMetricReporter metricReporterMock = mock(ClusterResourceMetricReporter.class); // // final SchedulingResultHandler schedulingResultHandler = // new SchedulingResultHandler(vmResourceManager, jobMessageRouter, workerRegistry); // TestHelpers.setupMasterConfig(); // final MantisSchedulerFenzoImpl mantisScheduler = new MantisSchedulerFenzoImpl(vmResourceManager, // schedulingResultHandler, // metricReporterMock, // agentClustersAutoScaler, // workerRegistry); // mantisScheduler.start(); // return mantisScheduler; // } }
4,252
0
Create_ds/mantis-control-plane/server/src/test/java/com/netflix/mantis/master
Create_ds/mantis-control-plane/server/src/test/java/com/netflix/mantis/master/scheduler/MantisSchedulerFenzoImplTest.java
//package com.netflix.mantis.master.scheduler; // //import com.netflix.fenzo.VirtualMachineLease; //import com.netflix.mantis.master.JobMessageRouter; //import com.netflix.mantis.master.WorkerRegistry; //import com.netflix.mantis.master.jobcluster.job.worker.events.WorkerLaunchFailed; //import com.netflix.mantis.master.jobcluster.job.worker.events.WorkerLaunched; //import com.netflix.mantis.master.jobcluster.job.worker.events.WorkerUnscheduleable; //import com.netflix.mantis.master.resourcemgmt.VMResourceManager; // // //import io.mantisrx.runtime.MachineDefinition; //import io.mantisrx.server.master.AgentClustersAutoScaler; //import io.mantisrx.server.master.LaunchTaskException; //import io.mantisrx.server.master.config.ConfigurationProvider; //import io.mantisrx.server.master.domain.JobId; //import io.mantisrx.server.master.domain.WorkerId; //import io.mantisrx.server.master.domain.WorkerPorts; //import io.mantisrx.server.master.mesos.VirtualMachineLeaseMesosImpl; //import org.junit.Test; // //import java.net.MalformedURLException; //import java.util.Arrays; //import java.util.Collections; //import java.util.Optional; //import java.util.function.Consumer; // //import static org.mockito.Mockito.*; // //public class MantisSchedulerFenzoImplTest { // final VMResourceManager vmResourceManagerMock = mock(VMResourceManager.class); // final JobMessageRouter jobMessageRouterMock = mock(JobMessageRouter.class); // final WorkerRegistry workerRegistryMock = mock(WorkerRegistry.class); // final AgentClustersAutoScaler agentClustersAutoScalerMock = mock(AgentClustersAutoScaler.class); // // public void runTestCase(final MantisSchedulerFenzoImpl mantisSchedulerFenzo, // final Consumer<MantisSchedulerFenzoImpl> consumer) { // consumer.accept(mantisSchedulerFenzo); // mantisSchedulerFenzo.shutdown(); // } // // @Test // public void testWorkerLaunchSuccess() throws MalformedURLException { // final JobId jobId = JobId.fromId("TestJobCluster-1").get(); // final WorkerId workerId = new WorkerId(jobId, 1, 2); // final String fakeHostname = "127.0.0.1"; // final String fakeVMId = "VM_ID"; // // VirtualMachineLeaseMesosImpl leaseMock = TestHelpers.createMockLease("lease_id", fakeHostname, fakeVMId, 4.0, // 12000, 1024, 1024, new VirtualMachineLease.Range(15000, 15010)); // //// jobLocatorMock.locateJob(jobId); //// when(jobLocatorMock.locateJob(jobId)).thenReturn(jobManagerMock); // // when(workerRegistryMock.getAcceptedAt(workerId)).thenReturn(Optional.empty()); // final MantisSchedulerFenzoImpl schedulerFenzo = TestHelpers.createMantisScheduler(vmResourceManagerMock, jobMessageRouterMock, workerRegistryMock, agentClustersAutoScalerMock); // // runTestCase(schedulerFenzo, mantisScheduler -> { // // mantisScheduler.addOffers(Arrays.asList(leaseMock)); // ScheduleRequest fakeScheduleRequest = TestHelpers.createFakeScheduleRequest(workerId, 0, 1, new MachineDefinition(2, 1024, 128, 1024, 4)); // mantisScheduler.scheduleWorker(fakeScheduleRequest); // WorkerPorts expectedAssignedPorts = new WorkerPorts(Arrays.asList(15000, 15001, 15002)); // WorkerLaunched expectedLaunchedEvent = new WorkerLaunched(workerId, fakeHostname, fakeVMId, expectedAssignedPorts); // verify(jobMessageRouterMock, timeout(1_000).times(1)).routeWorkerEvent(expectedLaunchedEvent); // verifyNoMoreInteractions(jobMessageRouterMock); // }); // } // // // // @Test // public void testWorkerLaunchFailed() throws MalformedURLException { // final JobId jobId = JobId.fromId("TestJobCluster-1").get(); // final WorkerId workerId = new WorkerId(jobId, 1, 2); // final String fakeHostname = "127.0.0.1"; // final String fakeVMId = "VM_ID"; // WorkerPorts workerPorts = new WorkerPorts(Arrays.asList(15000, 15001, 15002)); // // VirtualMachineLeaseMesosImpl leaseMock = TestHelpers.createMockLease("lease_id", fakeHostname, fakeVMId, 4.0, // 12000, 1024, 1024, new VirtualMachineLease.Range(15000, 15010)); // // // when(jobLocatorMock.locateJob(jobId)).thenReturn(jobManagerMock); // when(workerRegistryMock.getAcceptedAt(workerId)).thenReturn(Optional.empty()); // // ScheduleRequest fakeScheduleRequest = TestHelpers.createFakeScheduleRequest(workerId, 0, 1, new MachineDefinition(2, 1024, 128, 1024, 4)); // // // Simulate Mesos launch failure, should trigger a WorkerLaunched event followed by a WorkerLaunchFailed event // when(vmResourceManagerMock.launchTasks(Arrays.asList(new LaunchTaskRequest(fakeScheduleRequest, workerPorts)), Arrays.asList(leaseMock))) // .thenReturn(Collections.singletonMap(fakeScheduleRequest, new LaunchTaskException("fake exception", new IllegalStateException()))); // // final MantisSchedulerFenzoImpl schedulerFenzo = TestHelpers.createMantisScheduler(vmResourceManagerMock, jobMessageRouterMock, workerRegistryMock, agentClustersAutoScalerMock); // // runTestCase(schedulerFenzo, mantisScheduler -> { // // mantisScheduler.addOffers(Arrays.asList(leaseMock)); // mantisScheduler.scheduleWorker(fakeScheduleRequest); // WorkerPorts expectedAssignedPorts = new WorkerPorts(Arrays.asList(15000, 15001, 15002)); // WorkerLaunched expectedLaunchedEvent = new WorkerLaunched(workerId, fakeHostname, fakeVMId, expectedAssignedPorts); // WorkerLaunchFailed expectedLaunchFailedEvent = new WorkerLaunchFailed(workerId, String.format("%s failed due to fake exception", workerId.toString())); // verify(jobMessageRouterMock, timeout(1_000).times(1)).routeWorkerEvent(expectedLaunchedEvent); // verify(jobMessageRouterMock, timeout(1_000).times(1)).routeWorkerEvent(expectedLaunchFailedEvent); // }); // } // // // @Test // public void testWorkerUnscheduleable() throws MalformedURLException { // final JobId jobId = JobId.fromId("TestJobCluster-1").get(); // final WorkerId workerId = new WorkerId(jobId, 1, 2); // final String fakeHostname = "127.0.0.1"; // final String fakeVMId = "VM_ID"; // final int requestedMemoryMB = 1024; // final int memoryFromResourceOffer = requestedMemoryMB / 2; // // VirtualMachineLeaseMesosImpl leaseMock = TestHelpers.createMockLease("lease_id", fakeHostname, fakeVMId, 4.0, // memoryFromResourceOffer, 1024, 1024, new VirtualMachineLease.Range(15000, 15010)); // // JobManager jobManagerMock = mock(JobManager.class); // //when(jobLocatorMock.locateJob(jobId)).thenReturn(jobManagerMock); // when(workerRegistryMock.getAcceptedAt(workerId)).thenReturn(Optional.empty()); // // final MantisSchedulerFenzoImpl schedulerFenzo = TestHelpers.createMantisScheduler(vmResourceManagerMock, jobMessageRouterMock, workerRegistryMock, agentClustersAutoScalerMock); // // runTestCase(schedulerFenzo, mantisScheduler -> { // mantisScheduler.addOffers(Arrays.asList(leaseMock)); // ScheduleRequest fakeScheduleRequest = TestHelpers.createFakeScheduleRequest(workerId, 0, 1, new MachineDefinition(2, requestedMemoryMB, 128, 1024, 4)); // mantisScheduler.scheduleWorker(fakeScheduleRequest); // WorkerUnscheduleable expectedWorkerEvent = new WorkerUnscheduleable(workerId); // verify(jobMessageRouterMock, timeout(1_000).times(1)).routeWorkerEvent(expectedWorkerEvent); // verifyNoMoreInteractions(jobMessageRouterMock); // }); // } // // @Test // public void testLeaseRejectedAfterOfferExpiry() { // final String fakeHostname = "127.0.0.1"; // final String fakeVMId = "VM_ID"; // // VirtualMachineLeaseMesosImpl leaseMock = TestHelpers.createMockLease("lease_id", fakeHostname, fakeVMId, 4.0, // 12000, 1024, 1024, new VirtualMachineLease.Range(15000, 15010)); // // final MantisSchedulerFenzoImpl schedulerFenzo = TestHelpers.createMantisScheduler(vmResourceManagerMock, jobMessageRouterMock, workerRegistryMock, agentClustersAutoScalerMock); // // runTestCase(schedulerFenzo, mantisScheduler -> { // // mantisScheduler.addOffers(Arrays.asList(leaseMock)); // try { // Thread.sleep(ConfigurationProvider.getConfig().getMesosLeaseOfferExpirySecs()*1000 + 50); // } catch (InterruptedException e) { // e.printStackTrace(); // } // // verifyZeroInteractions(jobMessageRouterMock); // verifyZeroInteractions(workerRegistryMock); // verify(vmResourceManagerMock, timeout(10_000).times(1)).rejectLease(leaseMock); // verifyNoMoreInteractions(vmResourceManagerMock); // }); // } //}
4,253
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/MantisAuditLogEvent.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; public class MantisAuditLogEvent { private final Type type; private final String operand; private final String data; public MantisAuditLogEvent(Type type, String operand, String data) { this.type = type; this.operand = operand; this.data = data; } public Type getType() { return type; } public String getOperand() { return operand; } public String getData() { return data; } public enum Type { NAMED_JOB_CREATE, NAMED_JOB_UPDATE, NAMED_JOB_DELETE, NAMED_JOB_DISABLED, NAMED_JOB_ENABLED, JOB_SUBMIT, JOB_TERMINATE, JOB_DELETE, JOB_SCALE_UP, JOB_SCALE_DOWN, JOB_SCALE_UPDATE, WORKER_START, WORKER_TERMINATE, CLUSTER_SCALE_UP, CLUSTER_SCALE_DOWN, CLUSTER_ACTIVE_VMS } }
4,254
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/LeadershipManagerZkImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.concurrent.atomic.AtomicBoolean; import io.mantisrx.common.metrics.Gauge; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.master.config.MasterConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class LeadershipManagerZkImpl implements ILeadershipManager { private static final Logger logger = LoggerFactory.getLogger(LeadershipManagerZkImpl.class); private final Gauge isLeaderGauge; private final AtomicBoolean firstTimeLeaderMode = new AtomicBoolean(false); private final MasterConfiguration config; private final ServiceLifecycle serviceLifecycle; private volatile boolean isLeader = false; private volatile boolean isReady = false; public LeadershipManagerZkImpl(final MasterConfiguration config, final ServiceLifecycle serviceLifecycle) { this.config = config; this.serviceLifecycle = serviceLifecycle; Metrics m = new Metrics.Builder() .name(MasterMain.class.getCanonicalName()) .addGauge("isLeaderGauge") .build(); m = MetricsRegistry.getInstance().registerAndGet(m); isLeaderGauge = m.getGauge("isLeaderGauge"); } public void becomeLeader() { logger.info("Becoming leader now"); if (firstTimeLeaderMode.compareAndSet(false, true)) { serviceLifecycle.becomeLeader(); isLeaderGauge.set(1L); } else { logger.warn("Unexpected to be told to enter leader mode more than once, ignoring."); } isLeader = true; } public boolean isLeader() { return isLeader; } public boolean isReady() { return isReady; } public void setLeaderReady() { logger.info("marking leader READY"); isReady = true; } public void stopBeingLeader() { logger.info("Asked to stop being leader now"); isReady = false; isLeader = false; isLeaderGauge.set(0L); if (!firstTimeLeaderMode.get()) { logger.warn("Unexpected to be told to stop being leader when we haven't entered leader mode before, ignoring."); return; } // Various services may have built in-memory state that is currently not easy to revert to initialization state. // Until we create such a lifecycle feature for each service and all of their references, best thing to do is to // exit the process and depend on a watcher process to restart us right away. Especially since restart isn't // very expensive. logger.error("Exiting due to losing leadership after running as leader"); System.exit(1); } public MasterDescription getDescription() { return new MasterDescription( getHost(), getHostIP(), config.getApiPort(), config.getSchedInfoPort(), config.getApiPortV2(), config.getApiStatusUri(), config.getConsolePort(), System.currentTimeMillis() ); } private String getHost() { String host = config.getMasterHost(); if (host != null) { return host; } try { return InetAddress.getLocalHost().getHostName(); } catch (UnknownHostException e) { throw new RuntimeException("Failed to get the host information: " + e.getMessage(), e); } } private String getHostIP() { String ip = config.getMasterIP(); if (ip != null) { return ip; } try { return InetAddress.getLocalHost().getHostAddress(); } catch (UnknownHostException e) { throw new RuntimeException("Failed to get the host information: " + e.getMessage(), e); } } }
4,255
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/DurationTypeFitnessCalculator.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import com.netflix.fenzo.TaskAssignmentResult; import com.netflix.fenzo.TaskRequest; import com.netflix.fenzo.TaskTrackerState; import com.netflix.fenzo.VMTaskFitnessCalculator; import com.netflix.fenzo.VirtualMachineCurrentState; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.server.master.scheduler.ScheduleRequest; public class DurationTypeFitnessCalculator implements VMTaskFitnessCalculator { @Override public String getName() { return "Mantis Job Duration Type Task Fitness Calculator"; } @Override public double calculateFitness(TaskRequest taskRequest, VirtualMachineCurrentState targetVM, TaskTrackerState taskTrackerState) { MantisJobDurationType durationType = ((ScheduleRequest) taskRequest).getDurationType(); int totalTasks = 0; int sameTypeTasks = 0; for (TaskRequest request : targetVM.getRunningTasks()) { totalTasks++; if (((ScheduleRequest) request).getDurationType() == durationType) sameTypeTasks++; } for (TaskAssignmentResult result : targetVM.getTasksCurrentlyAssigned()) { totalTasks++; if (((ScheduleRequest) result.getRequest()).getDurationType() == durationType) sameTypeTasks++; } if (totalTasks == 0) return 0.9; // an arbitrary preferential value to indicate that a fresh new host is not perfect // fit but a better fit than a host that has tasks of different type return (double) sameTypeTasks / (double) totalTasks; } }
4,256
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/SchedulerCounters.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.util.concurrent.atomic.AtomicInteger; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; public class SchedulerCounters { private static final SchedulerCounters instance = new SchedulerCounters(); private final AtomicInteger iterationNumberCounter = new AtomicInteger(); private final AtomicInteger numResourceAllocationTrials = new AtomicInteger(0); private volatile IterationCounter counter = null; private SchedulerCounters() { } public static SchedulerCounters getInstance() { return instance; } public void incrementResourceAllocationTrials(int delta) { numResourceAllocationTrials.addAndGet(delta); } public IterationCounter getCounter() { return counter; } void endIteration(int numWorkersToLaunch, int numWorkersLaunched, int numSlavesToUse, int numSlavesRejected) { counter = new IterationCounter(iterationNumberCounter.getAndIncrement(), numWorkersToLaunch, numWorkersLaunched, numSlavesToUse, numSlavesRejected, numResourceAllocationTrials.getAndSet(0)); } String toJsonString() { return counter.toJsonString(); } public class IterationCounter { @JsonIgnore private final ObjectMapper mapper = new ObjectMapper(); private int iterationNumber; private int numWorkersToLaunch; private int numWorkersLaunched; private int numSlavesToUse; private int numSlavesRejected; private int numResourceAllocations; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) IterationCounter(@JsonProperty("iterationNumber") int iterationNumber, @JsonProperty("numWorkersToLaunch") int numWorkersToLaunch, @JsonProperty("numWorkersLaunched") int numWorkersLaunched, @JsonProperty("numSlavesToUse") int numOffersToUse, @JsonProperty("numSlavesRejected") int numOffersRejected, @JsonProperty("numResourceAllocationTrials") int numResourceAllocations) { this.iterationNumber = iterationNumber; this.numWorkersToLaunch = numWorkersToLaunch; this.numWorkersLaunched = numWorkersLaunched; this.numSlavesToUse = numOffersToUse; this.numSlavesRejected = numOffersRejected; this.numResourceAllocations = numResourceAllocations; mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); } void setCounters(int numWorkersToLaunch, int numWorkersLaunched, int numOffersToUse, int numOffersRejected) { this.iterationNumber++; this.numWorkersToLaunch = numWorkersToLaunch; this.numWorkersLaunched = numWorkersLaunched; this.numSlavesToUse = numOffersToUse; this.numSlavesRejected = numOffersRejected; } public int getIterationNumber() { return iterationNumber; } public int getNumWorkersToLaunch() { return numWorkersToLaunch; } public int getNumWorkersLaunched() { return numWorkersLaunched; } public int getNumSlavesToUse() { return numSlavesToUse; } public int getNumSlavesRejected() { return numSlavesRejected; } public int getNumResourceAllocations() { return numResourceAllocations; } public String toJsonString() { try { return mapper.writeValueAsString(this); } catch (JsonProcessingException e) { // shouldn't happen return iterationNumber + ", " + numWorkersToLaunch + ", " + numWorkersLaunched + ", " + numSlavesToUse + ", " + numSlavesRejected + ", " + numResourceAllocations; } } } }
4,257
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/MantisJobMgr.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; public class MantisJobMgr { } // // //import java.io.IOException; //import java.util.ArrayList; //import java.util.HashMap; //import java.util.HashSet; //import java.util.LinkedList; //import java.util.List; //import java.util.Map; //import java.util.Optional; //import java.util.Set; //import java.util.concurrent.ConcurrentSkipListSet; //import java.util.concurrent.ExecutorService; //import java.util.concurrent.Executors; //import java.util.concurrent.atomic.AtomicBoolean; //import java.util.concurrent.atomic.AtomicLong; //import java.util.concurrent.atomic.AtomicReference; // //import com.fasterxml.jackson.databind.DeserializationFeature; //import com.fasterxml.jackson.databind.ObjectMapper; //import com.fasterxml.jackson.datatype.jdk8.Jdk8Module; //import com.google.common.util.concurrent.ThreadFactoryBuilder; //import com.netflix.fenzo.ConstraintEvaluator; //import com.netflix.fenzo.VMTaskFitnessCalculator; //import com.netflix.spectator.api.BasicTag; //import io.mantisrx.common.WorkerPorts; //import io.mantisrx.common.metrics.Counter; //import io.mantisrx.common.metrics.Metrics; //import io.mantisrx.common.metrics.MetricsRegistry; //import io.mantisrx.common.metrics.spectator.GaugeCallback; //import io.mantisrx.common.metrics.spectator.MetricGroupId; ////import io.mantisrx.master.jobcluster.job.JobActor; ////import io.mantisrx.master.jobcluster.job.WorkerResubmitRateLimiter; //import io.mantisrx.runtime.JobConstraints; //import io.mantisrx.runtime.MachineDefinition; //import io.mantisrx.runtime.MantisJobDefinition; //import io.mantisrx.runtime.MantisJobDurationType; //import io.mantisrx.runtime.MantisJobState; //import io.mantisrx.runtime.MigrationStrategy; //import io.mantisrx.runtime.descriptor.SchedulingInfo; //import io.mantisrx.runtime.descriptor.StageScalingPolicy; //import io.mantisrx.runtime.descriptor.StageSchedulingInfo; //import io.mantisrx.server.core.JobCompletedReason; //import io.mantisrx.server.core.JobSchedulingInfo; //import io.mantisrx.server.core.Status; //import io.mantisrx.server.core.StatusPayloads; //import io.mantisrx.server.core.WorkerAssignments; //import io.mantisrx.server.core.WorkerHost; //import io.mantisrx.server.core.domain.JobMetadata; //import io.mantisrx.server.core.domain.WorkerId; //import io.mantisrx.server.master.agentdeploy.MigrationStrategyFactory; //import io.mantisrx.server.master.config.ConfigurationProvider; //import io.mantisrx.server.master.config.MasterConfiguration; //import io.mantisrx.server.master.domain.WorkerRequest; //import io.mantisrx.server.master.heartbeathandlers.HeartbeatPayloadHandler; //import io.mantisrx.server.master.scheduler.MantisScheduler; //import io.mantisrx.server.master.scheduler.ScheduleRequest; //import io.mantisrx.server.master.store.InvalidJobException; //import io.mantisrx.server.master.store.InvalidJobStateChangeException; //import io.mantisrx.server.master.store.JobAlreadyExistsException; //import io.mantisrx.server.master.store.MantisJobMetadata; //import io.mantisrx.server.master.store.MantisJobMetadataWritable; //import io.mantisrx.server.master.store.MantisJobStore; //import io.mantisrx.server.master.store.MantisStageMetadata; //import io.mantisrx.server.master.store.MantisStageMetadataWritable; //import io.mantisrx.server.master.store.MantisWorkerMetadata; //import io.mantisrx.server.master.store.MantisWorkerMetadataWritable; //import io.mantisrx.server.master.store.NamedJob; //import io.mantisrx.server.master.utils.MantisClock; //import io.mantisrx.server.master.utils.MantisSystemClock; //import io.reactivx.mantis.operators.OperatorOnErrorResumeNextViaFunction; //import org.HdrHistogram.SynchronizedHistogram; //import org.slf4j.Logger; //import org.slf4j.LoggerFactory; //import rx.Observable; //import rx.functions.Action0; //import rx.functions.Func1; //import rx.observers.SerializedObserver; //import rx.schedulers.Schedulers; //import rx.subjects.BehaviorSubject; //import rx.subjects.ReplaySubject; // // ///** // * Manages operational and informational aspects of a Mantis Job. // * It has at least the following responsibilities: // * <UL> // * <LI>Submit initial stages and workers needed for the job</LI> // * <LI>React to status reports on workers</LI> // * <LI>React to health reports on workers (in future)</LI> // * </UL> // * Also contained herein are job specific information such as status subject for the job. // */ //public class MantisJobMgr { // // public static class HeartbeatsStatus { // // private final int totalWorkers; // private final int heartbeatsFrom; // // public HeartbeatsStatus(int totalWorkers, int heartbeatsFrom) { // this.totalWorkers = totalWorkers; // // if job scaled down, total could be < heartbeatsFrom // this.heartbeatsFrom = Math.min(heartbeatsFrom, totalWorkers); // } // // public int getTotalWorkers() { // return totalWorkers; // } // // public int getHeartbeatsFrom() { // return heartbeatsFrom; // } // } // // private static final ExecutorService mantisJobMgrExecutorService = Executors.newFixedThreadPool(10, // new ThreadFactoryBuilder().setNameFormat("MantisJobMgr-pool-%d").build()); // // private static final Logger logger = LoggerFactory.getLogger(MantisJobMgr.class); // private static final long DISABLE_FOR_MILIS = 60000; // private final String jobId; // private final NamedJob namedJob; // private final MantisScheduler scheduler; // private final MantisJobDefinition jobDefinition; // private final ReplaySubject<Status> statusReplaySubject; // private final SerializedObserver<Status> statusSerializedObserver; // private MantisJobStatus jobStatus; // private final AtomicBoolean initialized; // private volatile MantisJobStore store = null; // private final JobActor.WorkerNumberGenerator workerNumberGenerator; // private final VirtualMachineMasterService vmService; // private final BehaviorSubject<JobSchedulingInfo> schedulingInfoBehaviorSubject; // private Map<Integer, WorkerAssignments> stageAssignments = new HashMap<>(); // private final ObjectMapper mapper = new ObjectMapper(); // private final Counter numWorkerResubmissions; // private final Counter numWorkerResubmitLimitReached; // private final Counter numWorkerTerminated; // private final Counter numScaleStage; // private final Counter workerLaunchToStartMillis; // private final Counter numHeartBeatsReceived; // private final WorkerResubmitRateLimiter resubmitRateLimiter = WorkerResubmitRateLimiter.getInstance(); // private final ConcurrentSkipListSet<Integer> workersOnDisabledVMs = new ConcurrentSkipListSet<>(); // private final AtomicLong lastNewSubscriberAt = new AtomicLong(System.currentTimeMillis()); // private long subscriptionTimeoutSecs = 0L; // private final AtomicReference<ArrayList<MantisWorkerMetadata>> heartbeatReceipts = new AtomicReference<>(new ArrayList<MantisWorkerMetadata>()); // private final MetricGroupId metricsName; // private final int workerWritesBatchSize; // private boolean hasJobMaster = false; // private final MigrationStrategy migrationStrategy; // private final MantisClock clock = MantisSystemClock.INSTANCE; // private volatile long lastWorkerMigrationTimestamp = Long.MIN_VALUE; // private final SynchronizedHistogram workerLaunchToStartDistMillis = new SynchronizedHistogram(3_600_000L, 3); // // public MantisJobMgr(final String jobId, // final MantisJobDefinition jobDefinition, // final NamedJob namedJob, // final MantisScheduler scheduler, // final VirtualMachineMasterService vmService) { // mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); // mapper.registerModule(new Jdk8Module()); // this.jobId = jobId; // if (namedJob == null) // throw new NullPointerException("Job cluster does not exist for job " + jobId); // this.namedJob = namedJob; // this.scheduler = scheduler; // this.vmService = vmService; // this.jobDefinition = jobDefinition; // final String jobIdValue = Optional.ofNullable(jobId).orElse("none"); // metricsName = new MetricGroupId(MantisJobMgr.class.getCanonicalName(), new BasicTag("jobId", jobIdValue)); // Metrics m = new Metrics.Builder() // .id(metricsName) // .addCounter("numWorkerResubmissions") // .addCounter("numWorkerResubmitLimitReached") // .addCounter("numWorkerTerminated") // .addCounter("numEphemeralJobTerminated") // .addCounter("numScaleStage") // .addCounter("workerLaunchToStartMillis") // .addCounter("numHeartBeatsReceived") // .addGauge(new GaugeCallback(metricsName, "workerLaunchToStartMillisP50", () -> (double) workerLaunchToStartDistMillis.getValueAtPercentile(50))) // .addGauge(new GaugeCallback(metricsName, "workerLaunchToStartMillisP95", () -> (double) workerLaunchToStartDistMillis.getValueAtPercentile(95))) // .addGauge(new GaugeCallback(metricsName, "workerLaunchToStartMillisP99", () -> (double) workerLaunchToStartDistMillis.getValueAtPercentile(99))) // .addGauge(new GaugeCallback(metricsName, "workerLaunchToStartMillisMax", () -> (double) workerLaunchToStartDistMillis.getValueAtPercentile(100))) // .build(); // m = MetricsRegistry.getInstance().registerAndGet(m); // numWorkerResubmissions = m.getCounter("numWorkerResubmissions"); // numWorkerResubmitLimitReached = m.getCounter("numWorkerResubmitLimitReached"); // numWorkerTerminated = m.getCounter("numWorkerTerminated"); // numScaleStage = m.getCounter("numScaleStage"); // workerLaunchToStartMillis = m.getCounter("workerLaunchToStartMillis"); // numHeartBeatsReceived = m.getCounter("numHeartBeatsReceived"); // statusReplaySubject = ReplaySubject.create(); // this.statusSerializedObserver = new SerializedObserver<>(statusReplaySubject); // jobStatus = new MantisJobStatus(jobId, statusReplaySubject, (jobDefinition == null ? "null job" : jobDefinition.getName())); // // schedulingInfoBehaviorSubject = BehaviorSubject.create(new JobSchedulingInfo(jobId, new HashMap<>())); // logger.debug("Added behavior subject of job " + jobId + " to schedulingObserver"); // statusReplaySubject // .lift(new OperatorOnErrorResumeNextViaFunction<>(throwable -> { // logger.warn("Couldn't send status to assignmentChangeObservers: " + throwable.getMessage()); // return Observable.empty(); // })) // .subscribe(status -> stageAssignmentsSetter(status)); // initialized = new AtomicBoolean(false); // workerNumberGenerator = new WorkerNumberGenerator(jobId); // workerWritesBatchSize = ConfigurationProvider.getConfig().getWorkerWriteBatchSize(); // migrationStrategy = MigrationStrategyFactory.getStrategy(jobId, namedJob.getMigrationConfig()); // } // // public BehaviorSubject<JobSchedulingInfo> getSchedulingInfoSubject() { // return schedulingInfoBehaviorSubject; // } // // public String getJobId() { // return jobId; // } // // boolean isReady() { // if (!initialized.get()) // logger.info("Job " + jobId + " not ready to be dispatched"); // return initialized.get(); // } // // public MantisJobMetadata getJobMetadata() { // return store.getActiveJob(jobId); // } // // public Optional<MantisJobMetadata> getCompletedJobMetadata() { // try { // return Optional.ofNullable(store.getCompletedJob(jobId)); // } catch (Exception e) { // return Optional.empty(); // } // } // // public MantisJobDefinition getJobDefinition() { // return jobDefinition; // } // // public ReplaySubject<Status> getStatusSubject() { // return statusReplaySubject; // } // // public void setInvalidStatus(String msg) { // jobStatus.setFatalError(msg); // } // // public MantisJobStatus getJobStatus() { // return jobStatus; // } // // public List<? extends MantisWorkerMetadata> getArchivedWorkers() { // try { // return store.getArchivedWorkers(jobId); // } catch (IOException e) { // logger.error("Can't get archived workers - " + e.getMessage(), e); // } // return new ArrayList<>(); // } // // public WorkerAssignments getSink() { // MantisJobMetadata mjmd = store.getActiveJob(jobId); // return stageAssignments.get(mjmd.getNumStages()); // } // // private void stageAssignmentsSetter(Status status) { // MantisJobMetadataWritable jobMetadata = (MantisJobMetadataWritable) getJobMetadata(); // if (MantisJobState.isTerminalState(jobMetadata.getState())) { // schedulingInfoBehaviorSubject.onCompleted(); // return; // } // if (status.getWorkerNumber() == -1) // return; // MantisWorkerMetadata mwmd = null; // MantisStageMetadata msmd = null; // try { // mwmd = getMantisWorkerMetadataWritable(status.getWorkerNumber()); // msmd = jobMetadata.getStageMetadata(mwmd.getStageNum()); // } catch (InvalidJobException e) { // try { // mwmd = getArchivedWorker(status.getWorkerNumber()); // if (mwmd == null) { // logger.warn("Unexpected to not find worker " + status.getWorkerNumber() + " for job " + jobId); // } else { // logger.warn("Skipping setting assignments for archived worker"); // } // } catch (IOException e1) { // logger.warn("Can't get archive worker number " + status.getWorkerNumber() + " for job " + jobId, e1); // } // return; // } // if (mwmd.getStageNum() < 1) // return; // for now don't send assignments for stage 0 // status.setStageNum(mwmd.getStageNum()); // status.setWorkerIndex(mwmd.getWorkerIndex()); // // WorkerAssignments assignments = stageAssignments.get(status.getStageNum()); // if (assignments == null) { // assignments = new WorkerAssignments(status.getStageNum(), msmd.getNumWorkers(), new HashMap<Integer, WorkerHost>()); // stageAssignments.put(status.getStageNum(), assignments); // } // try (AutoCloseable l = jobMetadata.obtainLock()) { // assignments.setNumWorkers(msmd.getNumWorkers()); // assignments.setActiveWorkers(((MantisStageMetadataWritable) msmd).getNumActiveWorkers()); // } catch (Exception e) { // logger.warn(jobId + ": Unexpected error locking stage " + msmd.getStageNum() + ": " + e.getMessage()); // } // Map<Integer, WorkerHost> hosts = assignments.getHosts(); // hosts.put(mwmd.getWorkerNumber(), new WorkerHost(mwmd.getSlave(), mwmd.getWorkerIndex(), mwmd.getPorts(), // status.getState(), mwmd.getWorkerNumber(), mwmd.getMetricsPort(), mwmd.getCustomPort())); // // //logger.info("Sending scheduling change to behavior subject due to " + status); // if (schedulingInfoBehaviorSubject != null) { // schedulingInfoBehaviorSubject.onNext(new JobSchedulingInfo(jobId, new HashMap<>(stageAssignments))); // } // // remove from WorkerHost any workers that are in terminal state, notification went out for them once. // Set<Integer> keysToRem = new HashSet<>(); // for (Map.Entry<Integer, WorkerHost> entry : assignments.getHosts().entrySet()) { // if (MantisJobState.isTerminalState(entry.getValue().getState())) // keysToRem.add(entry.getKey()); // } // for (Integer k : keysToRem) // assignments.getHosts().remove(k); // } // // private List<? extends Object> getAllWorkers(Func1<MantisWorkerMetadata, Boolean> filter, // Func1<MantisWorkerMetadata, ? extends Object> mapFunc) { // MantisJobMetadata mjmd = store.getActiveJob(jobId); // List<Object> result = new ArrayList<>(); // for (MantisStageMetadata msmd : mjmd.getStageMetadata()) // for (MantisWorkerMetadata mwmd : msmd.getAllWorkers()) // if (filter.call(mwmd)) // result.add(mapFunc.call(mwmd)); // return result; // } // // @SuppressWarnings("unchecked") // public List<MantisWorkerMetadata> getAllRunningWorkers() { // List<MantisWorkerMetadata> result = (List<MantisWorkerMetadata>) getAllWorkers( // new Func1<MantisWorkerMetadata, Boolean>() { // @Override // public Boolean call(MantisWorkerMetadata mwmd) { // return MantisJobState.isRunningState(mwmd.getState()); // } // }, // new Func1<MantisWorkerMetadata, Object>() { // @Override // public Object call(MantisWorkerMetadata workerMetadata) { // return workerMetadata; // } // }); // return result; // } // // private boolean allWorkersStarted() { // for (MantisStageMetadata msmd : getJobMetadata().getStageMetadata()) { // for (MantisWorkerMetadata mwmd : msmd.getWorkerByIndexMetadataSet()) // if (mwmd.getState() != MantisJobState.Started) // return false; // } // return true; // } // // private boolean heartbeatTooOld(MantisWorkerMetadata mwmd) { // if (((MantisWorkerMetadataWritable) mwmd).getLastHeartbeatAt() < // (System.currentTimeMillis() - (3 * ConfigurationProvider.getConfig().getWorkerTimeoutSecs() * 1000))) // return true; // return false; // } // // private void scaleStages(MantisJobMetadata mjmd) { // if (!initialized.get()) // return; // not ready yet // final MantisJobMetadata jobMetadata = getJobMetadata(); // if (MantisJobState.isTerminalState(jobMetadata.getState())) // return; // // ensure that each stage has the right number of workers // // For now, the number of workers per stage is static (no auto scaling), just make sure there is a worker // // for each worker index. // boolean killTheJob = false; // String killMsg = ""; // for (MantisStageMetadata msmd : mjmd.getStageMetadata()) { // if (!killTheJob) { // try (AutoCloseable l = jobMetadata.obtainLock()) { // for (int wIndex = 0; wIndex < msmd.getNumWorkers(); wIndex++) { // boolean hasWorker = true; // MantisWorkerMetadata terminatedWorker = null; // try { // terminatedWorker = mjmd.getWorkerByIndex(msmd.getStageNum(), wIndex); // if (MantisJobState.isErrorState(terminatedWorker.getState())) // hasWorker = false; // if (terminatedWorker.getState() == MantisJobState.Completed) { // // worker complete, tear down job // killMsg = "Terminating job " + jobId + " due to worker # " + terminatedWorker.getWorkerNumber() + // " index " + terminatedWorker.getWorkerIndex() + " of stage " + terminatedWorker.getStageNum() + " completed"; // killTheJob = true; // break; // } // } catch (InvalidJobException e) { // hasWorker = false; // } // if (!hasWorker) { // // There isn't a worker for this index // int numPorts = msmd.getMachineDefinition().getNumPorts(); // try { // int workerNumber = workerNumberGenerator.getNextWorkerNumber(); // logger.info("creating non-existing worker for index " + wIndex + " workerNumber=" + // workerNumber + " for job " + jobId); // final WorkerRequest request; // if (terminatedWorker == null) { // logger.info("storing new worker " + workerNumber + " for index " + wIndex + " of job " + jobId); // request = createWorkerRequest(msmd, wIndex, mjmd.getUser(), Optional.empty()); // store.storeNewWorker(request); // } else { // logger.info("replacing worker " + terminatedWorker.getWorkerNumber() + " with new worker " + // workerNumber + " for index " + wIndex + " of job " + jobId); // request = createWorkerRequest(msmd, wIndex, mjmd.getUser(), terminatedWorker.getCluster()); // final MantisWorkerMetadata mwmd = store.replaceTerminatedWorker(request, terminatedWorker); // logger.info("Done replacing worker " + terminatedWorker.getWorkerNumber() + " with new worker " + // mwmd.getWorkerNumber() + " for index " + wIndex + " of job " + jobId); // } // queueTask(request); // } catch (Exception e1) { // logger.error("Can't store new worker for stage " + msmd.getStageNum() + ", workerIndex=" + // wIndex + " - " + e1.getMessage(), e1); // } // } // } // } catch (Exception e) { // logger.error("caught unexpected error ", e); // } // shouldn't happen // } // } // if (killTheJob) { // killJob("MantisMaster", killMsg); // } // } // // private WorkerRequest createWorkerRequest(final MantisStageMetadata msmd, // final int index, // final String user, // final Optional<String> preferredCluster) { // int workerNumber = workerNumberGenerator.getNextWorkerNumber(); // MantisJobMetadata mjmd = getJobMetadata(); // return new WorkerRequest(msmd.getMachineDefinition(), jobId, index, workerNumber, // mjmd.getJarUrl(), msmd.getStageNum(), msmd.getNumStages(), msmd.getNumWorkers(), // mjmd.getName(), msmd.getMachineDefinition().getNumPorts(), mjmd.getParameters(), mjmd.getSla(), // msmd.getHardConstraints(), msmd.getSoftConstraints(), jobDefinition.getSchedulingInfo(), // subscriptionTimeoutSecs, getMinRuntimeSecs(), mjmd.getSubmittedAt(), user, preferredCluster); // } // // private WorkerRequest createExistingWorkerRequest(final MantisStageMetadata msmd, // final int index, // final int workerNumber, // final String user, // final Optional<String> preferredCluster) { // MantisJobMetadata mjmd = getJobMetadata(); // return new WorkerRequest(msmd.getMachineDefinition(), jobId, index, workerNumber, // mjmd.getJarUrl(), msmd.getStageNum(), msmd.getNumStages(), msmd.getNumWorkers(), // mjmd.getName(), msmd.getMachineDefinition().getNumPorts(), mjmd.getParameters(), mjmd.getSla(), // msmd.getHardConstraints(), msmd.getSoftConstraints(), jobDefinition.getSchedulingInfo(), // subscriptionTimeoutSecs, getMinRuntimeSecs(), mjmd.getSubmittedAt(), user, preferredCluster); // } // // public int scaleUpStage(int stage, int increment, String reason) throws InvalidJobException { // if (!isActive() || increment < 1) // return 0; // final MantisJobMetadata jobMetadata = getJobMetadata(); // final MantisStageMetadataWritable msmd = (MantisStageMetadataWritable) jobMetadata.getStageMetadata(stage); // if (msmd == null) // throw new InvalidJobException(jobId, stage, -1); // if (!msmd.getScalingPolicy().isEnabled()) { // logger.warn("Job " + jobId + " stage " + stage + " is not scalable, can't increment #workers by " + increment); // return 0; // } // int actualIncrement = 0; // try (AutoCloseable l = jobMetadata.obtainLock()) { // final int oldNumWorkers = msmd.getNumWorkers(); // final int max = msmd.getScalingPolicy().getMax(); // actualIncrement = Math.min(oldNumWorkers + increment, max) - oldNumWorkers; // if (actualIncrement > 0) { // msmd.unsafeSetNumWorkers(oldNumWorkers + increment); // try { // store.updateStage(msmd); // } catch (IOException e) { // logger.error("Error setting stage count for job " + jobId); // msmd.unsafeSetNumWorkers(oldNumWorkers); // return 0; // } // for (int i = 0; i < actualIncrement; i++) { // int newWorkerIndex = oldNumWorkers + i; // final WorkerRequest workerRequest = createWorkerRequest(msmd, newWorkerIndex, // jobMetadata.getUser(), Optional.empty()); // MantisWorkerMetadata workerByIndex = null; // try { // workerByIndex = jobMetadata.getWorkerByIndex(msmd.getStageNum(), newWorkerIndex); // } catch (InvalidJobException e) {} // worker for that index didn't exist before // if (workerByIndex == null) { // store.storeNewWorker(workerRequest); // } else { // store.replaceTerminatedWorker(workerRequest, workerByIndex); // } // queueTask(workerRequest); // // if(workerByIndex==null) // // workerByIndex = mjmd.getWorkerByIndex(msmd.getStageNum(), i); // // logger.info("Worker for index " + i + " is " + workerByIndex.getWorkerNumber()); // } // } // } catch (IOException | InvalidJobException e) { // logger.error("Couldn't create new worker for scaling up: " + e.getMessage(), e); // } catch (Exception e) { // logger.error("Unexpected error: " + e.getMessage(), e); // } // shouldn't happen // if (actualIncrement > 0) { // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent( // MantisAuditLogEvent.Type.JOB_SCALE_UP, jobId, "Scaled up (auto) stage " + stage + " by " + actualIncrement + " workers")); // String mesg = "Scaled up (auto) stage " + stage + " of job " + jobId + " by " + actualIncrement + " workers to " + // msmd.getNumWorkers() + ", reason: " + reason; // sendStatus(new Status(jobId, stage, -1, -1, Status.TYPE.INFO, mesg, MantisJobState.Started)); // logger.info(mesg); // } // return actualIncrement; // } // // public int scaleDownStage(int stage, int decrement, String reason) throws InvalidJobException { // if (!isActive() || decrement < 1) // return 0; // final MantisJobMetadata jobMetadata = getJobMetadata(); // final MantisStageMetadataWritable msmd = (MantisStageMetadataWritable) jobMetadata.getStageMetadata(stage); // if (msmd == null) // throw new InvalidJobException(jobId, stage, -1); // if (!msmd.getScalingPolicy().isEnabled()) { // logger.warn("Job " + jobId + " stage " + stage + " is not scalable, can't increment #workers by " + decrement); // return 0; // } // int actualDecrement = 0; // StringBuilder sb = new StringBuilder(); // try (AutoCloseable l = jobMetadata.obtainLock()) { // int oldNumWorkers = msmd.getNumWorkers(); // int min = msmd.getScalingPolicy().getMin(); // actualDecrement = oldNumWorkers - Math.max(oldNumWorkers - decrement, min); // if (actualDecrement > 0) { // msmd.unsafeSetNumWorkers(oldNumWorkers - decrement); // try { // store.updateStage(msmd); // } catch (IOException e) { // logger.error("Error setting stage count for job " + jobId); // msmd.unsafeSetNumWorkers(oldNumWorkers); // return 0; // } // for (int w = 0; w < actualDecrement; w++) { // final MantisWorkerMetadata r = msmd.getWorkerByIndex(oldNumWorkers - w - 1); // killWorker(r); // store.archiveWorker((MantisWorkerMetadataWritable) r); // if (!msmd.unsafeRemoveWorker(r.getWorkerIndex(), r.getWorkerNumber())) { // logger.warn(String.format("Job %s stage %d: Unexpected worker state, couldn't remove worker idx=%d, number=%d", // jobId, msmd.getStageNum(), r.getWorkerIndex(), r.getWorkerNumber())); // } // logger.info(String.format("Job %s archived worker index %d, number %d", jobId, // r.getWorkerIndex(), r.getWorkerNumber())); // sb.append(r.getWorkerIndex()).append(","); // } // } // } catch (Exception e) { // logger.warn("Unexpected error scaling down stage " + stage + " of job " + jobId + " by " + // decrement + " workers: " + e.getMessage()); // } // if (actualDecrement > 0) { // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent( // MantisAuditLogEvent.Type.JOB_SCALE_DOWN, jobId, "Scaled down (auto) stage " + stage + " by " + actualDecrement + " workers")); // String mesg = "Scaled down (auto) stage " + stage + " of job " + jobId + " by " + actualDecrement + // " workers (indexes " + sb.toString() + ") to " + msmd.getNumWorkers() + ", reason: " + reason; // sendStatus(new Status(jobId, stage, -1, -1, Status.TYPE.INFO, mesg, MantisJobState.Started)); // } // return actualDecrement; // } // // public void setScalingPolicy(int stageNum, StageScalingPolicy scalingPolicy) throws InvalidJobException { // if (!isActive()) // return; // final MantisJobMetadata jobMetadata = getJobMetadata(); // final MantisStageMetadataWritable msmd = (MantisStageMetadataWritable) jobMetadata.getStageMetadata(stageNum); // if (msmd == null) // throw new InvalidJobException(jobId, stageNum, -1); // try (AutoCloseable l = jobMetadata.obtainLock()) { // msmd.setScalingPolicy(scalingPolicy); // boolean stored = false; // if (scalingPolicy != null && scalingPolicy.isEnabled()) { // msmd.setScalable(true); // logger.info(jobId + ": setting #workers from " + msmd.getNumWorkers() + " to " + scalingPolicy.getMin() + // "/" + scalingPolicy.getMax()); // if (msmd.getNumWorkers() < scalingPolicy.getMin()) { // setStageWorkersCount(stageNum, scalingPolicy.getMin(), "updated scaling policy"); // stored = true; // } // if (msmd.getNumWorkers() > scalingPolicy.getMax()) { // setStageWorkersCount(stageNum, scalingPolicy.getMax(), "updated scaling policy"); // stored = true; // } // } // if (!stored) { // try { // store.updateStage(msmd); // } catch (IOException e) { // logger.error("Error updating stage: " + e.getMessage(), e); // throw new InvalidJobException(jobId, stageNum, -1, e); // } // } // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent( // MantisAuditLogEvent.Type.JOB_SCALE_UPDATE, jobId, "Scaling policy updated for stage " + stageNum + " (" + // (scalingPolicy == null ? "null" : mapper.writeValueAsString(scalingPolicy)) + ")")); // } catch (InvalidJobException ije) { // throw ije; // } catch (Exception e) { // catching for AutoCloseable // logger.error("Unexpected error: " + e.getMessage(), e); // shouldn't happen // } // } // // public void setStageWorkersCount(int stageNum, int numWorkers, String reason) throws InvalidJobException { // if (!isActive() || numWorkers < 1) // return; // final MantisJobMetadata mjmd = getJobMetadata(); // final MantisStageMetadataWritable msmd = (MantisStageMetadataWritable) mjmd.getStageMetadata(stageNum); // if (msmd == null) // throw new InvalidJobException(jobId, stageNum, -1); // if (!msmd.getScalable()) { // sendStatus(new Status(jobId, stageNum, -1, -1, Status.TYPE.WARN, // "Can't change #workers to " + numWorkers + ", stage " + stageNum + " is not scalable", MantisJobState.Started)); // logger.warn("Job " + jobId + " stage " + stageNum + " is not scalable, can't change #workers to " + numWorkers); // return; // } // if (msmd.getScalingPolicy() != null && numWorkers < msmd.getScalingPolicy().getMin()) { // final String msg = "Can't scale down #workers of stage " + stageNum + " below MIN=" + // msmd.getScalingPolicy().getMin() + " to requested " + numWorkers; // sendStatus(new Status(jobId, stageNum, -1, -1, Status.TYPE.WARN, msg, MantisJobState.Started)); // logger.warn("Job " + jobId + ": " + msg); // return; // } // if (msmd.getScalingPolicy() != null && numWorkers > msmd.getScalingPolicy().getMax()) { // final String msg = "Can't scale up #workers of stage " + stageNum + " above MAX=" + // msmd.getScalingPolicy().getMax() + " to requested " + numWorkers; // sendStatus(new Status(jobId, stageNum, -1, -1, Status.TYPE.WARN, msg, MantisJobState.Started)); // logger.warn("Job " + jobId + ": " + msg); // return; // } // sendStatus(new Status(jobId, stageNum, -1, -1, Status.TYPE.INFO, // "Setting #workers to " + numWorkers + " for stage " + stageNum + ", reason=" + reason, // MantisJobState.Started)); // numScaleStage.increment(); // try (AutoCloseable l = mjmd.obtainLock()) { // int prevNum = msmd.getNumWorkers(); // msmd.unsafeSetNumWorkers(numWorkers); // try { // store.updateStage(msmd); // } catch (IOException e) { // logger.error("Error setting stage count for job " + jobId); // throw new InvalidJobException(jobId, msmd.getStageNum(), -1, e); // } // if (prevNum < numWorkers) { // for (int i = prevNum; i < numWorkers; i++) { // //logger.info("Creating new worker for index " + i); // MantisWorkerMetadata workerByIndex = null; // try { // workerByIndex = mjmd.getWorkerByIndex(msmd.getStageNum(), i); // } catch (InvalidJobException e) {} // worker for that index didn't exist before // final WorkerRequest workerRequest; // if (workerByIndex == null) { // workerRequest = createWorkerRequest(msmd, i, mjmd.getUser(), Optional.empty()); // store.storeNewWorker(workerRequest); // } else { // workerRequest = createWorkerRequest(msmd, i, mjmd.getUser(), workerByIndex.getCluster()); // store.replaceTerminatedWorker(workerRequest, workerByIndex); // } // queueTask(workerRequest); // // if(workerByIndex==null) // // workerByIndex = mjmd.getWorkerByIndex(msmd.getStageNum(), i); // // logger.info("Worker for index " + i + " is " + workerByIndex.getWorkerNumber()); // } // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent( // MantisAuditLogEvent.Type.JOB_SCALE_UP, jobId, "Scaled up (manual) stage " + stageNum + " by " + // (numWorkers - prevNum) + " workers" + // (reason != null && !reason.isEmpty() ? ", reason=" + reason : ""))); // } else { // for (int w = numWorkers; w < prevNum; w++) { // MantisWorkerMetadata worker = mjmd.getWorkerByIndex(msmd.getStageNum(), w); // logger.info("Removing worker index " + worker.getWorkerIndex() + " due to scale down of stage " + stageNum + " of job " + jobId); // sendStatus(new Status(jobId, stageNum, worker.getWorkerIndex(), worker.getWorkerNumber(), Status.TYPE.INFO, // "Removing worker index " + worker.getWorkerIndex() + " due to scale down of stage " + stageNum, MantisJobState.Noop)); // killWorker(worker); // store.archiveWorker((MantisWorkerMetadataWritable) worker); // if (!msmd.unsafeRemoveWorker(worker.getWorkerIndex(), worker.getWorkerNumber())) // logger.error(String.format("Job %s stage %d: Unexpected worker state, couldn't remove worker idx=%d, number=%d", // jobId, msmd.getStageNum(), worker.getWorkerIndex(), worker.getWorkerNumber())); // logger.info(String.format("Job %s archived worker index %d, number %d", jobId, // worker.getWorkerIndex(), worker.getWorkerNumber())); // } // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent( // MantisAuditLogEvent.Type.JOB_SCALE_DOWN, jobId, "Scaled down (manual) stage " + stageNum + " by " + // (prevNum - numWorkers) + " workers" + // (reason != null && !reason.isEmpty() ? ", reason=" + reason : ""))); // } // } catch (IOException | InvalidJobException e) { // logger.error("Error in manual scale: " + e.getMessage(), e); // } catch (Exception e) { // logger.error("Unexpected error: " + e.getMessage(), e); // } // shouldn't happen // } // // public HeartbeatsStatus getAndResetHeartbeatsStatus() { // final MantisJobMetadata jobMetadata = getJobMetadata(); // try (AutoCloseable l = jobMetadata.obtainLock()) { // if (!MantisJobState.isRunningState(jobMetadata.getState())) // return null; // final ArrayList<MantisWorkerMetadata> receipts = heartbeatReceipts.getAndSet(new ArrayList<MantisWorkerMetadata>()); // Set<String> uniqueWorkers = new HashSet<>(); // if (!receipts.isEmpty()) { // for (MantisWorkerMetadata mwmd : receipts) // uniqueWorkers.add(mwmd.getStageNum() + ":" + mwmd.getWorkerIndex()); // } // return new HeartbeatsStatus(getAllRunningWorkers().size(), uniqueWorkers.size()); // } catch (Exception e) { // logger.warn("Unexpected: " + e.getMessage()); // return null; // } // } // // public void checkJobWorkersHealth(final boolean checkMissedHeartbeats) { // if (initialized.get() && !MantisJobState.isTerminalState(getJobMetadata().getState())) { // if (checkMissedHeartbeats) { // checkOnHeartbeatStatus(); // } // moveWorkersOnDisabledVMs(); // scaleStages(getJobMetadata()); // } // } // // private void checkOnHeartbeatStatus() { // for (MantisWorkerMetadata mwmd : getAllRunningWorkers()) { // if (heartbeatTooOld(mwmd)) { // vmService.killTask(mwmd.getWorkerId()); // handleStatus(new Status(jobId, mwmd.getStageNum(), mwmd.getWorkerIndex(), mwmd.getWorkerNumber(), // Status.TYPE.ERROR, getWorkerStringPrefix(mwmd.getStageNum(), mwmd.getWorkerIndex(), mwmd.getWorkerNumber()) + // " Heartbeat too old", MantisJobState.Failed)); // } // } // } // // private int getTotalNumWorkers() { // return getAllWorkers(mwm -> true, mwm -> mwm).size(); // } // // private void moveWorkersOnDisabledVMs() { // if (!workersOnDisabledVMs.isEmpty()) { // final List<Integer> workersToMigrate = migrationStrategy.execute( // workersOnDisabledVMs, getAllRunningWorkers().size(), // getTotalNumWorkers(), lastWorkerMigrationTimestamp); // workersToMigrate.forEach(workerNumber -> { // final MantisWorkerMetadata workerMetadata = getWorkerMetadata(workerNumber, false); // if (workerMetadata == null) { // return; // worker not active // } // try (AutoCloseable ac = getJobMetadata().obtainLock()) { // if (MantisJobState.isRunningState(workerMetadata.getState())) { // logger.info("Moving worker number " + workerMetadata.getWorkerNumber() + " index " + workerMetadata.getWorkerIndex() + // " of job " + jobId + " away from disabled VM"); // vmService.killTask(workerMetadata.getWorkerId()); // handleStatus(new Status(jobId, workerMetadata.getStageNum(), workerMetadata.getWorkerIndex(), workerMetadata.getWorkerNumber(), // Status.TYPE.ERROR, getWorkerStringPrefix(workerMetadata.getStageNum(), workerMetadata.getWorkerIndex(), workerMetadata.getWorkerNumber()) + // " Moving out of disabled VM " + // workerMetadata.getSlave(), MantisJobState.Failed)); // lastWorkerMigrationTimestamp = clock.now(); // } // } catch (Exception e) { // // shouldn't happen // logger.warn("Unexpected exception locking metadata for worker num " + workerMetadata.getWorkerNumber() + " of job " + jobId); // } // }); // } // } // // public void workerUnScheduleable(int stage, final WorkerId workerId) { // logger.info("Rate limiting unschedulable worker {} stage {}", workerId, stage); // sendStatus(new Status(jobId, stage, workerId.getWorkerIndex(), workerId.getWorkerNum(), Status.TYPE.INFO, // getWorkerStringPrefix(stage, workerId.getWorkerIndex(), workerId.getWorkerNum()) + // " rate limiting: no resources to fit worker", MantisJobState.Accepted)); // final long resubmitAt = resubmitRateLimiter.getWorkerResubmitTime(jobId, stage, workerId.getWorkerIndex()); // scheduler.updateWorkerSchedulingReadyTime(workerId, resubmitAt); // } // // protected ScheduleRequest createSchedulingRequest(final WorkerRequest workerRequest, final Optional<Long> readyAt) { // final WorkerId workerId = new WorkerId(workerRequest.getJobId(), workerRequest.getWorkerIndex(), workerRequest.getWorkerNumber()); // // // setup constraints // final List<ConstraintEvaluator> hardConstraints = new ArrayList<>(); // final List<VMTaskFitnessCalculator> softConstraints = new ArrayList<>(); // MantisStageMetadata stageMetadata = getJobMetadata().getStageMetadata(workerRequest.getWorkerStage()); // List<JobConstraints> stageHC = stageMetadata.getHardConstraints(); // List<JobConstraints> stageSC = stageMetadata.getSoftConstraints(); // // final Set<String> coTasks = new HashSet<>(); // if ((stageHC != null && !stageHC.isEmpty()) || // (stageSC != null && !stageSC.isEmpty())) { // for (MantisWorkerMetadata mwmd : stageMetadata.getWorkerByIndexMetadataSet()) { // if (mwmd.getWorkerNumber() != workerId.getWorkerNum()) // coTasks.add(workerId.getId()); // } // } // if (stageHC != null && !stageHC.isEmpty()) { // for (JobConstraints c : stageHC) { // hardConstraints.add(ConstraintsEvaluators.hardConstraint(c, coTasks)); // } // } // if (stageSC != null && !stageSC.isEmpty()) { // for (JobConstraints c : stageSC) { // softConstraints.add(ConstraintsEvaluators.softConstraint(c, coTasks)); // } // } // // final ScheduleRequest scheduleRequest = new ScheduleRequest(workerId, // workerRequest.getWorkerStage(), // workerRequest.getNumPortsPerInstance(), // new JobMetadata(jobId, // jobDefinition.getJobJarFileLocation(), // workerRequest.getTotalStages(), // workerRequest.getUser(), // workerRequest.getSchedulingInfo(), // workerRequest.getParameters(), // workerRequest.getSubscriptionTimeoutSecs(), // workerRequest.getMinRuntimeSecs()), // getJobMetadata().getSla().getDurationType(), // workerRequest.getDefinition(), // hardConstraints, // softConstraints, // readyAt.orElse(0L), // workerRequest.getPreferredCluster()); // return scheduleRequest; // } // // protected void queueTask(final WorkerRequest workerRequest, final Optional<Long> readyAt) { // final ScheduleRequest schedulingRequest = createSchedulingRequest(workerRequest, readyAt); // scheduler.scheduleWorker(schedulingRequest); // } // // protected void queueTask(final WorkerRequest workerRequest) { // queueTask(workerRequest, Optional.empty()); // } // // private void dequeueIfTerminalAndSendStatus(Status status) { // try { // if (MantisJobState.isTerminalState(status.getState())) { // final int workerNumber = status.getWorkerNumber(); // final MantisJobMetadata jobMetadata = getJobMetadata(); // if (jobMetadata != null) { // try { // final MantisWorkerMetadata w = jobMetadata.getWorkerByNumber(workerNumber); // if (w != null) // scheduler.unscheduleWorker( // w.getWorkerId(), // Optional.ofNullable(w.getSlave())); // } catch (InvalidJobException e) { // logger.info(jobId + ": couldn't remove task number " + workerNumber + // " from scheduler queue upon state=" + status.getState() + " - " + e.getMessage()); // } // } // } // statusSerializedObserver.onNext(status); // } catch (Exception e) { // logger.warn("Problem sending status out: " + e.getMessage(), e); // } // } // // private void storeWorkerStateAndSendStatus(int workerNumber, MantisJobState state, JobCompletedReason reason, Status status) // throws IOException, InvalidJobException, InvalidJobStateChangeException { // dequeueIfTerminalAndSendStatus(status); // final MantisJobMetadata jobMetadata = getJobMetadata(); // if (jobMetadata == null || jobMetadata.getSla() == null || store == null) { // logger.error("would fail to store worker state {} jobID {} workerNum {} (jobMetadataNull? {} slaNull? {} storeNull? {})", // state, jobId, workerNumber, (jobMetadata == null), (jobMetadata == null || jobMetadata.getSla() == null), (store == null)); // } // store.storeWorkerState(jobId, workerNumber, state, reason, // jobMetadata.getSla().getDurationType() == MantisJobDurationType.Perpetual); // } // // private void sendStatus(Status status) { // try { // statusSerializedObserver.onNext(status); // } catch (Exception e) { // logger.warn("Problem sending status out: " + e.getMessage(), e); // } // } // // public boolean isActive() { // return !MantisJobState.isTerminalState(getJobMetadata().getState()); // } // // // caller must lock job object // private void killWorker(MantisWorkerMetadata mwmd) { // try { // storeWorkerStateAndSendStatus(mwmd.getWorkerNumber(), MantisJobState.Completed, JobCompletedReason.Killed, // new Status(jobId, mwmd.getStageNum(), mwmd.getWorkerIndex(), // mwmd.getWorkerNumber(), Status.TYPE.INFO, // getWorkerStringPrefix(mwmd.getStageNum(), mwmd.getWorkerIndex(), mwmd.getWorkerNumber()) + // " Killed", MantisJobState.Completed)); // vmService.killTask(mwmd.getWorkerId()); // numWorkerTerminated.increment(); // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.WORKER_TERMINATE, // jobId + "-workerNum-" + mwmd.getWorkerNumber(), "stage=" + mwmd.getStageNum() + ", index=" + mwmd.getWorkerIndex())); // } catch (IOException | InvalidJobException | InvalidJobStateChangeException e) { // logger.error("Can't kill worker " + mwmd.getWorkerNumber() + " of job " + jobId + ": " + e.getMessage()); // } // } // // private void killJob(String user) { // killJob(user, null); // } // // public void killJob(String user, String reason) { // MantisJobMetadata jobMetadata = getJobMetadata(); // logger.info("attempt to acquire lock to kill job {}", jobId); // try (AutoCloseable l = jobMetadata.obtainLock()) { // if (MantisJobState.isTerminalState(jobMetadata.getState())) // return; // for (MantisWorkerMetadata mwmd : getAllRunningWorkers()) { // killWorker(mwmd); // } // storeAndMarkJobTerminated(MantisJobState.Completed); // if (reason != null && !reason.isEmpty()) { // dequeueIfTerminalAndSendStatus(new Status(jobId, -1, -1, -1, Status.TYPE.INFO, "Killing job, reason: " + reason, MantisJobState.Completed)); // logger.info("Killing job {} user {} reason {}", jobId, user, reason); // } // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.JOB_TERMINATE, jobId, "user=" + user)); // } catch (Exception e) { // logger.error("Unexpected error while killing job: " + e.getMessage(), e); // } // } // // void enforceSla() { // if (MantisJobState.isTerminalState(getJobMetadata().getState())) // return; // // this is the only sla we have for now // if (getJobMetadata().getSla().getRuntimeLimitSecs() > 0 && hasReachedRuntimeLimit()) { // killJob("MantisMaster", "reached runtime limit of " + getJobMetadata().getSla().getRuntimeLimitSecs() + " secs"); // } // } // // private boolean hasReachedRuntimeLimit() { // return (System.currentTimeMillis() - getJobMetadata().getSubmittedAt()) > (getJobMetadata().getSla().getRuntimeLimitSecs() * 1000); // } // // private void registerHeartbeat(final Status status) { // MantisJobState jobState = getJobMetadata().getState(); // numHeartBeatsReceived.increment(); // final Optional<WorkerId> workerIdO = status.getWorkerId(); // if (!workerIdO.isPresent()) { // logger.warn("received heartbeat from worker with invalid ID {} {}-{}", status.getJobId(), status.getWorkerIndex(), status.getWorkerNumber()); // return; // } // final WorkerId workerId = workerIdO.get(); // if (MantisJobState.isTerminalState(jobState)) { // logger.warn("Got heartbeat from worker {} in state={}, killing it", workerId, jobState); // try { // int stageNum = getJobMetadata().getWorkerByNumber(workerId.getWorkerNum()).getStageNum(); // storeWorkerStateAndSendStatus(workerId.getWorkerNum(), MantisJobState.Failed, JobCompletedReason.Killed, // new Status(jobId, stageNum, workerId.getWorkerIndex(), workerId.getWorkerNum(), Status.TYPE.INFO, // getWorkerStringPrefix(stageNum, workerId.getWorkerIndex(), workerId.getWorkerNum()) + " killed, shouldn't be running", // MantisJobState.Failed)); // } catch (Exception e) { // logger.warn("Unexpected error storing state=Failed for worker {}, ignoring", workerId); // } // vmService.killTask(workerId); // return; // } // try { // logger.debug("Got heartbeat from {} with {} payloads", workerId, status.getPayloads().size()); // MantisWorkerMetadataWritable mwmd = getMantisWorkerMetadataWritable(workerId.getWorkerNum()); // if (mwmd != null) { // try (AutoCloseable l = getJobMetadata().obtainLock()) { // if (!MantisJobState.isOnSlaveState(mwmd.getState())) { // // this worker shouldn't be running, kill it // vmService.killTask(mwmd.getWorkerId()); // } else { // heartbeatReceipts.get().add(mwmd); // mwmd.setLastHeartbeatAt(System.currentTimeMillis()); // if (mwmd.getState() != MantisJobState.Started) { // storeWorkerStateAndSendStatus(workerId.getWorkerNum(), MantisJobState.Started, JobCompletedReason.Normal, status); // } // if ((jobState != MantisJobState.Launched) && allWorkersStarted()) { // markJobLaunched(); // } // handlePayloads(mwmd, status.getPayloads()); // } // } catch (Exception e) { // logger.warn("Can't obtain lock on worker " + mwmd.getWorkerNumber() + " of job " + mwmd.getJobId() // + " - " + e.getMessage()); // } // } else { // // heartbeat from a worker that isn't supposed to be there, kill it // vmService.killTask(workerId); // } // } catch (InvalidJobException e) { // logger.warn("Got worker heartbeat for invalid worker {}, killing", workerId, e); // vmService.killTask(workerId); // } // } // // public void handleSubscriberTimeout() { // final MantisJobMetadata jobMetadata = getJobMetadata(); // try (AutoCloseable l = jobMetadata.obtainLock()) { // if (!MantisJobState.isTerminalState(jobMetadata.getState())) { // if ((System.currentTimeMillis() - lastNewSubscriberAt.get()) > (evalSubscriberTimeoutSecs() * 1000)) // killJob("MantisMaster", "ephemeral job with no subscribers for " + evalSubscriberTimeoutSecs() + " secs"); // } // } catch (Exception e) { // logger.error("Unexpected to not get lock on job metadata: " + e.getMessage(), e); // } // } // // private long getMinRuntimeSecs() { // final MantisJobMetadata jobMetadata = getJobMetadata(); // return (jobMetadata.getSubmittedAt() / 1000L + jobMetadata.getSla().getMinRuntimeSecs()) - System.currentTimeMillis() / 1000L; // } // // public long evalSubscriberTimeoutSecs() { // long minRuntimeSecs = getMinRuntimeSecs(); // return Math.max( // minRuntimeSecs, // subscriptionTimeoutSecs // ); // } // // public boolean markNewSubscriber() throws InvalidJobException { // final MantisJobMetadata jobMetadata = getJobMetadata(); // try (AutoCloseable l = jobMetadata.obtainLock()) { // if (MantisJobState.isTerminalState(jobMetadata.getState())) // return false; // lastNewSubscriberAt.set(System.currentTimeMillis()); // return true; // } catch (Exception e) { // logger.error("Unexpected to not get lock on job metadata: " + e.getMessage(), e); // return false; // } // } // // private void handlePayloads(MantisWorkerMetadata mwmd, List<Status.Payload> payloads) { // if (payloads == null) // return; // for (Status.Payload payload : payloads) { // if (!hasJobMaster || // StatusPayloads.Type.valueOf(payload.getType()) == StatusPayloads.Type.IncomingDataDrop // ) { // // handle only if there's no jobMaster, or if it exists, handle only data drop for outlier workers // HeartbeatPayloadHandler.getInstance().handle( // new HeartbeatPayloadHandler.Data(jobId, this, mwmd.getStageNum(), mwmd.getWorkerIndex(), // mwmd.getWorkerNumber(), payload)); // } // } // } // // /* // Return empty Optional if no resubmit, else return the delayed resubmit time for the new worker // */ // private Optional<Long> shouldResubmit(int stageNumber, int workerIndex, // MantisJobState oldState, MantisJobState newState, JobCompletedReason reason) { // if (MantisJobState.isTerminalState(oldState)) // return Optional.empty(); // switch (newState) { // case Failed: // return Optional.ofNullable(resubmitRateLimiter.getWorkerResubmitTime(jobId, stageNumber, workerIndex)); // case Completed: // return Optional.empty(); // for now don't resubmit on this case // // if(reason == JobCompletedReason.Error) { // // resubmitHandler.delayResubmit(stageNumber, workerIndex); // // return true; // // } // } // return Optional.empty(); // } // // @SuppressWarnings("unchecked") // private List<MantisWorkerMetadata> getAllActiveWorkers() { // return (List<MantisWorkerMetadata>) getAllWorkers( // new Func1<MantisWorkerMetadata, Boolean>() { // @Override // public Boolean call(MantisWorkerMetadata w) { // return !MantisJobState.isTerminalState(w.getState()); // } // }, // new Func1<MantisWorkerMetadata, MantisWorkerMetadata>() { // @Override // public MantisWorkerMetadata call(MantisWorkerMetadata mantisWorkerMetadata) { // return mantisWorkerMetadata; // } // }); // } // // public void handleStatus(final Status status) { // Action0 postAction = null; // boolean doPostActionIfRequired = false; // if (status.getType() == Status.TYPE.HEARTBEAT) { // registerHeartbeat(status); // return; // } // try { // MantisWorkerMetadataWritable mwmd = getMantisWorkerMetadataWritable(status.getWorkerNumber()); // final Optional<Long> shdResubmit = shouldResubmit(mwmd.getStageNum(), mwmd.getWorkerIndex(), mwmd.getState(), status.getState(), status.getReason()); // if (MantisJobState.isOnSlaveState(status.getState()) || MantisJobState.isTerminalState(status.getState())) // status.setHostname(mwmd.getSlave()); // if (MantisJobState.isTerminalState(status.getState())) { // if (!shdResubmit.isPresent() && isValidWorkerIndex(status)) { // boolean killJob = getJobMetadata().getSla().getDurationType() == MantisJobDurationType.Perpetual; // if (!killJob) { // List<MantisWorkerMetadata> allActiveWorkers = getAllActiveWorkers(); // if (allActiveWorkers != null && allActiveWorkers.size() == 1) { // if (allActiveWorkers.get(0).getWorkerNumber() == status.getWorkerNumber()) { // // got complete on the last running worker, terminate job // killJob = true; // } // } // } // final boolean klJob = killJob; // postAction = new Action0() { // @Override // public void call() { // status.getWorkerId().ifPresent(wId -> { // vmService.killTask(wId); // explicitly kill the worker reporting complete // }); // if (klJob) { // logger.warn("Worker " + status.getWorkerNumber() + " of job " + jobId + // " reached terminal state of " + status.getState() + ", terminating entire job"); // killJob("MantisMaster"); // } // } // }; // } // } // try (AutoCloseable l = getJobMetadata().obtainLock()) { // if (shdResubmit.isPresent()) { // resubmitWorker(status, shdResubmit, mwmd.getCluster()); // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.WORKER_TERMINATE, // jobId + "-workerNum-" + status.getWorkerNumber(), // "stage=" + status.getStageNum() + ", index=" + status.getWorkerIndex())); // } else { // storeWorkerStateAndSendStatus(status.getWorkerNumber(), status.getState(), status.getReason(), status); // if (status.getState() == MantisJobState.Started) { // if (getJobMetadata().getState() == MantisJobState.Accepted) { // if (allWorkersStarted()) { // postAction = new Action0() { // @Override // public void call() { // markJobLaunched(); // } // }; // doPostActionIfRequired = true; // } // } // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.WORKER_START, // jobId + "-workerNum-" + status.getWorkerNumber(), // "stage=" + status.getStageNum() + ", index=" + status.getWorkerIndex())); // final long startLatency = mwmd.getStartedAt() - mwmd.getLaunchedAt(); // workerLaunchToStartMillis.increment(startLatency); // workerLaunchToStartDistMillis.recordValue(startLatency); // } else if (MantisJobState.isTerminalState(status.getState())) // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.WORKER_TERMINATE, // jobId + "-workerNum-" + status.getWorkerNumber(), // "stage=" + status.getStageNum() + ", index=" + status.getWorkerIndex() + ", reason=" + status.getReason())); // } // doPostActionIfRequired = true; // } catch (InvalidJobStateChangeException jse) { // if (MantisJobState.isOnSlaveState(status.getState())) { // // slave is trying to start job when it shouldn't, kill it // vmService.killTask(mwmd.getWorkerId()); // logger.warn("Invalid to move worker " + status.getWorkerNumber() + // " to " + status.getState() + " state, sent kill request - " + jse.getMessage()); // } else // logger.warn("Ignoring worker state change error - " + jse.getMessage()); // } catch (InvalidJobException ije) { // logger.warn("Ignoring worker status error - " + ije.getMessage()); // } // } catch (InvalidJobException ije) { // if (MantisJobState.isOnSlaveState(status.getState())) { // status.getWorkerId().ifPresent(wId -> { // logger.warn("Unknown worker status '{}' received, sent request to kill {}-{}-{}", status.getState(), status.getJobId(), // status.getWorkerIndex(), status.getWorkerNumber()); // vmService.killTask(wId); // }); // } else { // logger.warn("Ignoring worker status '{}' on unknown worker {}-{}-{}", status.getState(), // status.getJobId(), status.getWorkerIndex(), status.getWorkerNumber()); // } // } catch (Exception e) { // logger.error("Unexpected status (" + status + ") - " + e.getMessage(), e); // // sendStatus(new Status(jobId, status.getStageNum(), status.getWorkerIndex(), status.getWorkerNumber(), Status.TYPE.ERROR, // // status.getMessage()+": Error setting state - " + e.getMessage(), status.getState())); // } finally { // if (doPostActionIfRequired && postAction != null) // postAction.call(); // } // } // // private void markJobLaunched() { // final MantisJobMetadata jobMetadata = getJobMetadata(); // boolean notifyNmdJb = false; // try (AutoCloseable l = jobMetadata.obtainLock()) { // if (jobMetadata.getState() == MantisJobState.Accepted) { // store.storeJobState(jobId, MantisJobState.Launched); // notifyNmdJb = true; // } // } catch (InvalidJobException | IOException | InvalidJobStateChangeException e) { // logger.warn("Couldn't mark state=Started on job " + jobId + ": " + e.getMessage()); // } catch (Exception e) { // logger.warn("Unexpected exception: " + e.getMessage()); // shouldn't happen // } // if (notifyNmdJb) // // call NamedJob mutating method asynchronous with current JobMgr mutating method // Schedulers.from(mantisJobMgrExecutorService).createWorker().schedule(() -> { // try { // namedJob.addJobMgr(this); // } catch (Exception e) { // logger.error("caught exception adding Job Manager for {}", jobId); // } // }); // } // // private boolean isValidWorkerIndex(Status status) { // return !(status.getWorkerIndex() < 0 || status.getStageNum() < 0) && // status.getWorkerIndex() < getJobMetadata().getStageMetadata(status.getStageNum()).getNumWorkers(); // } // // private String getWorkerStringPrefix(int stageNum, int index, int number) { // return "stage " + stageNum + " worker index=" + index + " number=" + number; // } // // public void setWorkerLaunched(final int workerNumber, final String hostname, final String slaveID, // final Optional<String> clusterName, final WorkerPorts ports) // throws InvalidJobStateChangeException, InvalidJobException { // MantisJobMetadata mjmd = getJobMetadata(); // MantisWorkerMetadataWritable mwmd = getMantisWorkerMetadataWritable(workerNumber); // try (AutoCloseable l = mjmd.obtainLock()) { // mwmd.setSlave(hostname); // mwmd.setSlaveID(slaveID); // mwmd.setCluster(clusterName); // mwmd.setMetricsPort(ports.getMetricsPort()); // mwmd.setDebugPort(ports.getDebugPort()); // mwmd.setConsolePort(ports.getConsolePort()); // mwmd.setCustomPort(ports.getCustomPort()); // mwmd.addPorts(ports.getPorts()); // mwmd.setLastHeartbeatAt(System.currentTimeMillis()); // start counting heartbeats from now // Status status = new Status(jobId, // mwmd.getStageNum(), mwmd.getWorkerIndex(), mwmd.getWorkerNumber(), Status.TYPE.INFO, // getWorkerStringPrefix(mwmd.getStageNum(), mwmd.getWorkerIndex(), mwmd.getWorkerNumber()) + // " scheduled on " + hostname + " with ports=" + mapper.writeValueAsString(ports) // , MantisJobState.Launched); // status.setHostname(hostname); // storeWorkerStateAndSendStatus(workerNumber, MantisJobState.Launched, JobCompletedReason.Normal, status); // logger.info("Launched worker " + mwmd.getWorkerNumber() + " index " + mwmd.getWorkerIndex() + // " of job " + jobId + " on host " + hostname + " using ports " + mapper.writeValueAsString(ports)); // } catch (InvalidJobException | InvalidJobStateChangeException e) { // throw e; // } catch (Exception e) { // throw new InvalidJobStateChangeException(jobId, mwmd.getState(), MantisJobState.Launched, e); // } // } // // public void handleVMStatusForWorker(Status status) { // if (MantisJobState.isTerminalState(status.getState())) // handleStatus(status); // else if (MantisJobState.isRunningState(status.getState())) { // boolean killTask = false; // MantisWorkerMetadataWritable w = null; // try { // w = getMantisWorkerMetadataWritable(status.getWorkerNumber()); // if (MantisJobState.isTerminalState(w.getState())) { // killTask = true; // } // } catch (InvalidJobException e) { // killTask = true; // } // if (killTask) { // status.getWorkerId().ifPresent(wId -> { // vmService.killTask(wId); // logger.info("Killing worker {} supposed to be in terminal state, got active state notification from VMService", wId); // }); // // } // } // } // // public void resubmitWorker(int workerNumber) throws InvalidJobException, InvalidJobStateChangeException { // resubmitWorker(workerNumber, null); // } // // public void resubmitWorker(int workerNumber, String reason) throws InvalidJobException, InvalidJobStateChangeException { // MantisJobMetadata mjmd = store.getActiveJob(jobId); // MantisWorkerMetadata mwmd = mjmd.getWorkerByNumber(workerNumber); // try (AutoCloseable l = mjmd.obtainLock()) { // if (!MantisJobState.isRunningState(mwmd.getState())) // throw new InvalidJobStateChangeException(jobId, mwmd.getState()); // final Status status = new Status(jobId, mwmd.getStageNum(), mwmd.getWorkerIndex(), mwmd.getWorkerNumber(), // Status.TYPE.WARN, getWorkerStringPrefix(mwmd.getStageNum(), mwmd.getWorkerIndex(), mwmd.getWorkerNumber()) + // " User requested resubmit", MantisJobState.Failed); // status.setReason(JobCompletedReason.Relaunched); // resubmitWorker(status, Optional.empty(), mwmd.getCluster()); // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.WORKER_TERMINATE, // jobId + "-workerNum-" + status.getWorkerNumber(), // "stage=" + status.getStageNum() + ", index=" + status.getWorkerIndex() + // (reason == null ? ", resubmit requested" : ", reason=" + reason))); // } catch (InvalidJobException | InvalidJobStateChangeException e) { // throw e; // } catch (Exception e) { // logger.error("Unexpected error resubmitting worker " + workerNumber + " of job " + jobId + // " for state change; " + e.getMessage()); // throw new InvalidJobException(jobId, mwmd.getStageNum(), mwmd.getWorkerNumber(), e); // } // } // // // Caller must lock worker associated with status // private void resubmitWorker(final Status status, // final Optional<Long> when, // final Optional<String> cluster) throws InvalidJobException, InvalidJobStateChangeException { // MantisJobMetadata mjmd = store.getActiveJob(jobId); // MantisWorkerMetadata mwmd = mjmd.getWorkerByNumber(status.getWorkerNumber()); // MantisStageMetadata msmd = mjmd.getStageMetadata(mwmd.getStageNum()); // if (mwmd.getTotalResubmitCount() < ConfigurationProvider.getConfig().getMaximumResubmissionsPerWorker()) { // WorkerRequest request = new WorkerRequest(msmd.getMachineDefinition(), // jobId, mwmd.getWorkerIndex(), workerNumberGenerator.getNextWorkerNumber(), mjmd.getJarUrl(), // mwmd.getStageNum(), mjmd.getNumStages(), msmd.getNumWorkers(), // mjmd.getName(), msmd.getMachineDefinition().getNumPorts(), // mjmd.getParameters(), mjmd.getSla(), msmd.getHardConstraints(), msmd.getSoftConstraints(), // jobDefinition.getSchedulingInfo(), subscriptionTimeoutSecs, getMinRuntimeSecs(), // mjmd.getSubmittedAt(), mjmd.getUser(), cluster); // try { // final Status s = new Status(jobId, mwmd.getStageNum(), status.getWorkerIndex(), status.getWorkerNumber(), Status.TYPE.INFO, // "resubmitting lost worker - " + status.getMessage(), status.getState()); // s.setHostname(status.getHostname()); // dequeueIfTerminalAndSendStatus(s); // ((MantisWorkerMetadataWritable) mwmd).setState(status.getState(), System.currentTimeMillis(), status.getReason()); // vmService.killTask(mwmd.getWorkerId()); // in case it is still there // final MantisWorkerMetadata mwmdr = store.replaceTerminatedWorker(request, mwmd); // queueTask(request, when); // logger.info("Resubmitted worker " + status.getWorkerNumber() + " with " + mwmdr.getWorkerNumber() + // " for index " + mwmd.getWorkerIndex() + " of job " + jobId); // numWorkerResubmissions.increment(); // } catch (IOException | InvalidJobException e) { // logger.error("Couldn't submit replacement worker " + mwmd.getWorkerNumber() + " for job " + // jobId + "-worker-" + status.getWorkerIndex() + ": " + e.getMessage(), e); // } // } else { // numWorkerResubmitLimitReached.increment(); // try { // storeWorkerStateAndSendStatus(status.getWorkerNumber(), MantisJobState.Failed, JobCompletedReason.Error, // new Status(jobId, mwmd.getStageNum(), status.getWorkerIndex(), status.getWorkerNumber(), Status.TYPE.ERROR, // getWorkerStringPrefix(status.getStageNum(), status.getWorkerIndex(), status.getWorkerNumber()) + // " Max resubmissions reached: lost worker ", MantisJobState.Failed)); // logger.info("Setting job state to failed as well"); // storeAndMarkJobTerminated(MantisJobState.Failed); // // } catch (IOException e) { // logger.error("Can't store state for " + status); // } // } // } // // private void storeAndMarkJobTerminated(final MantisJobState jobState) // throws IOException, InvalidJobException, InvalidJobStateChangeException { // final long submittedAt = getJobMetadata().getSubmittedAt(); // final String user = getJobMetadata().getUser(); // store.storeJobState(jobId, jobState); // statusSerializedObserver.onCompleted(); // resubmitRateLimiter.endJob(jobId); // HeartbeatPayloadHandler.getInstance().completeJob(this); // schedulingInfoBehaviorSubject.onCompleted(); // MetricsRegistry.getInstance().remove(metricsName); // // perform the following call asynchronously since it is expected that the current method we are in is // // holding a job lock due to job state mutation. Where as, the following method is a NamedJob mutation which may // // grab a NamedJob lock. // Schedulers.from(mantisJobMgrExecutorService).createWorker().schedule(() -> { // try { // namedJob.jobComplete(MantisJobMgr.this, jobState, submittedAt, user); // } catch (Exception e) { // logger.error("Error marking jobComplete in NamedJob for job " + jobId); // } // }); // } // // private MantisWorkerMetadataWritable getMantisWorkerMetadataWritable(int workerNumber) throws InvalidJobException { // MantisJobMetadata mjmd = store.getActiveJob(jobId); // return (MantisWorkerMetadataWritable) mjmd.getWorkerByNumber(workerNumber); // } // // public MantisWorkerMetadata getWorkerMetadata(int workerNumber, boolean evenIfArchived) { // try { // return getMantisWorkerMetadataWritable(workerNumber); // } catch (InvalidJobException e) { // if (evenIfArchived) { // try { // return store.getArchivedWorker(jobId, workerNumber); // } catch (IOException e1) { // logger.warn("Error getting archived worker " + workerNumber + " for job " + jobId, e1); // return null; // } // } else // return null; // } // } // // private MantisWorkerMetadata getArchivedWorker(int workerNumber) throws IOException { // for (MantisWorkerMetadata mwmd : store.getArchivedWorkers(jobId)) { // if (mwmd.getWorkerNumber() == workerNumber) // return mwmd; // } // return null; // } // // void handlePersistentWorkerState(int workerIndex, int workerNumber, MantisJobState state) { // if (state == MantisJobState.Noop) // return; // try { // MantisWorkerMetadata mwmd = getMantisWorkerMetadataWritable(workerNumber); // try (AutoCloseable wLock = getJobMetadata().obtainLock()) { // if (mwmd.getState() != state) // logger.info("Skipping monitor check on state for job " + jobId + // " worker " + workerIndex + "-" + workerNumber + " since state (" + state + // ") already changed to " + mwmd.getState()); // else { // logger.info("Will check on job " + jobId + " worker " + workerIndex + "-" + workerNumber + // " for state " + state); // if (state == MantisJobState.Launched) { // // resubmit worker // resubmitWorker(new Status(jobId, mwmd.getStageNum(), mwmd.getWorkerIndex(), mwmd.getWorkerNumber(), // Status.TYPE.ERROR, getWorkerStringPrefix(mwmd.getStageNum(), workerIndex, workerNumber) + // " Resubmitting worker stuck in " + state + " state", MantisJobState.Failed), // Optional.empty(), // mwmd.getCluster()); // } // // Not handling worker stuck in other states for now. E.g., if stuck in Accepted state for too long // } // } catch (Exception e) { // logger.warn("Unable to obtain lock on job " + jobId + " worker " + mwmd.getWorkerNumber() + // " - " + e.getMessage()); // } // } catch (InvalidJobException e) { // try { // boolean foundArchived = getArchivedWorker(workerNumber) != null; // if (!foundArchived) // logger.error("Error handling persistent worker state of " + state + " for job " + jobId + " worker " // + workerNumber + " - " + e.getMessage()); // } catch (IOException e1) { // logger.warn("Error getting archived workers for job " + jobId); // } // } // } // // public void workerOnDisabledVM(int workerNumber) { // logger.info(getJobId() + ": adding worker " + workerNumber + " on disabled vm for job " + jobId); // workersOnDisabledVMs.add(workerNumber); // } // // void initialize(MantisJobStore store) { // this.store = store; // MantisJobMetadata mjmd = store.getActiveJob(jobId); // if (namedJob.getIsReadyForJobMaster() && isAutoscaled(mjmd)) // hasJobMaster = true; // if (mjmd != null) { // workerNumberGenerator.init(store, false); // if (!MantisJobState.isTerminalState(mjmd.getState())) { // for (MantisStageMetadata msmd : mjmd.getStageMetadata()) { // Map<Integer, WorkerHost> workerHosts = new HashMap<>(); // for (MantisWorkerMetadata mwmd : msmd.getWorkerByIndexMetadataSet()) // if (MantisJobState.isRunningState(mwmd.getState())) { // ((MantisWorkerMetadataWritable) mwmd).setLastHeartbeatAt(System.currentTimeMillis()); // initialize Heartbeat start // workerHosts.put(mwmd.getWorkerNumber(), new WorkerHost(mwmd.getSlave(), mwmd.getWorkerIndex(), mwmd.getPorts(), // mwmd.getState(), mwmd.getWorkerNumber(), mwmd.getMetricsPort(), mwmd.getCustomPort())); // // final WorkerRequest workerRequest = createExistingWorkerRequest(msmd, mwmd.getWorkerIndex(), mwmd.getWorkerNumber(), mjmd.getUser(), mwmd.getCluster()); // logger.debug("initializing Running task {}-worker-{}-{}", workerRequest.getJobId(), workerRequest.getWorkerIndex(), workerRequest.getWorkerNumber()); // scheduler.initializeRunningWorker( // createSchedulingRequest(workerRequest, Optional.empty()), // mwmd.getSlave()); // } else if (mwmd.getState() == MantisJobState.Accepted) { // final WorkerRequest workerRequest = createExistingWorkerRequest(msmd, mwmd.getWorkerIndex(), mwmd.getWorkerNumber(), mjmd.getUser(), mwmd.getCluster()); // logger.debug("queuing Accepted task {}-worker-{}-{}", workerRequest.getJobId(), workerRequest.getWorkerIndex(), workerRequest.getWorkerNumber()); // queueTask(workerRequest); // } // if (msmd.getStageNum() > 0) // for now don't send stage 0 assignments // stageAssignments.put(msmd.getStageNum(), new WorkerAssignments(msmd.getStageNum(), msmd.getNumWorkers(), workerHosts)); // } // if (schedulingInfoBehaviorSubject != null) // schedulingInfoBehaviorSubject.onNext(new JobSchedulingInfo(jobId, stageAssignments)); // } else { // statusSerializedObserver.onCompleted(); // if (schedulingInfoBehaviorSubject != null) // schedulingInfoBehaviorSubject.onCompleted(); // MetricsRegistry.getInstance().remove(metricsName); // } // subscriptionTimeoutSecs = initSubscriptionTimeoutSecs(mjmd); // } else // logger.error("No job metadata from store for job " + jobId); // if (!initialized.compareAndSet(false, true)) // throw new IllegalStateException("Job " + jobId + " already initialized"); // } // // private long initSubscriptionTimeoutSecs(MantisJobMetadata mjmd) { // if (mjmd.getSla().getDurationType() == MantisJobDurationType.Perpetual) // return 0L; // return mjmd.getSubscriptionTimeoutSecs() == 0 ? // ConfigurationProvider.getConfig().getEphemeralJobUnsubscribedTimeoutSecs() : // mjmd.getSubscriptionTimeoutSecs(); // } // // void initialize(WorkerJobDetails jobDetails, final MantisJobStore store) // throws InvalidJobDetailsException { // this.store = store; // MantisJobMetadata mjmd; // try { // final SchedulingInfo schedulingInfo = jobDetails.getRequest().getJobDefinition().getSchedulingInfo(); // if (namedJob.getIsReadyForJobMaster() && isAutoscaled(schedulingInfo)) { // setupJobMasterStage(schedulingInfo); // } // mjmd = store.storeNewJob(jobDetails); // logger.info("Stored job " + jobId); // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.JOB_SUBMIT, jobDetails.getJobId(), "user=" + jobDetails.getUser())); // workerNumberGenerator.init(store, true); // subscriptionTimeoutSecs = initSubscriptionTimeoutSecs(mjmd); // } catch (IOException | JobAlreadyExistsException e) { // logger.error("Error storing new job " + jobDetails.getJobId() + " - " + e.getMessage(), e); // throw new InvalidJobDetailsException(e.getMessage(), e); // } // List<WorkerRequest> workers = getInitialWorkers(jobDetails, mjmd.getSubmittedAt()); // int beg = 0; // while (true) { // if (beg >= workers.size()) // break; // int en = beg + Math.min(workerWritesBatchSize, workers.size() - beg); // final List<WorkerRequest> workerRequests = workers.subList(beg, en); // try { // store.storeNewWorkers(workerRequests); // workerRequests.forEach(this::queueTask); // } catch (InvalidJobException e) { // // TODO confirm if this is possible, likely not // throw new InvalidJobDetailsException("Unexpected error: " + e.getMessage(), e); // } catch (IOException e) { // logger.error("Error storing workers of job " + jobId + " - " + e.getMessage(), e); // } // beg = en; // } // if (!initialized.compareAndSet(false, true)) // throw new IllegalStateException("Job " + jobId + " already initialized"); // } // // private void setupJobMasterStage(SchedulingInfo schedulingInfo) { // if (schedulingInfo.forStage(0) == null) { // // create stage 0 schedulingInfo only if not already provided // final StageSchedulingInfo stageSchedulingInfo = new StageSchedulingInfo(1, getJobMasterMachineDef(), // null, null, // for now, there are no hard or soft constraints // null, false); // jobMaster stage itself is not scaled // schedulingInfo.addJobMasterStage( // stageSchedulingInfo // ); // } // hasJobMaster = true; // } // // public boolean hasJobMaster() { // return hasJobMaster; // } // // private boolean isAutoscaled(SchedulingInfo schedulingInfo) { // for (Map.Entry<Integer, StageSchedulingInfo> entry : schedulingInfo.getStages().entrySet()) { // final StageScalingPolicy scalingPolicy = entry.getValue().getScalingPolicy(); // if (scalingPolicy != null && scalingPolicy.isEnabled()) { // return true; // } // } // return false; // } // // private boolean isAutoscaled(MantisJobMetadata job) { // for (MantisStageMetadata s : job.getStageMetadata()) // if (s.getScalable() && s.getScalingPolicy() != null && s.getScalingPolicy().isEnabled()) // return true; // return false; // } // // private MachineDefinition getJobMasterMachineDef() { // MasterConfiguration config = ConfigurationProvider.getConfig(); // return new MachineDefinition( // config.getJobMasterCores(), config.getJobMasterMemoryMB(), config.getJobMasterNetworkMbps(), // config.getJobMasterDiskMB(), 1 // ); // } // // @SuppressWarnings( {"rawtypes", "unchecked"}) // private List<WorkerRequest> getInitialWorkers(WorkerJobDetails jobDetails, long submittedAt) throws InvalidJobDetailsException { // List<WorkerRequest> workerRequests = new LinkedList<>(); // SchedulingInfo schedulingInfo = jobDetails.getRequest().getJobDefinition().getSchedulingInfo(); // int totalStages = schedulingInfo.getStages().size(); // // NOTE: stages are numbered from 1, except when a job master exists, it will be 0. // for (int s = 0; s <= totalStages; s++) // setupStageWorkers(jobDetails, workerRequests, schedulingInfo, totalStages, s, submittedAt); // return workerRequests; // } // // private void setupStageWorkers(WorkerJobDetails jobDetails, List<WorkerRequest> workerRequests, // SchedulingInfo schedulingInfo, int totalStages, int stageNum, long submittedAt) { // StageSchedulingInfo stage = schedulingInfo.getStages().get(stageNum); // if (stage == null) // return; // can happen when stageNum=0 and there is no jobMaster defined // int numPorts = stage.getMachineDefinition().getNumPorts(); // int numInstancesAtStage = stage.getNumberOfInstances(); // // add worker request for each instance required in stage // int stageIndex = 0; // for (int i = 0; i < numInstancesAtStage; i++) { // int workerNumber = workerNumberGenerator.getNextWorkerNumber(); // // during initialization worker number and index are identical // WorkerRequest workerRequest = new WorkerRequest(stage.getMachineDefinition(), // jobDetails.getJobId(), stageIndex++, workerNumber, jobDetails.getJobJarUrl(), // stageNum, totalStages, numInstancesAtStage, // jobDetails.getJobName(), numPorts, // jobDetails.getRequest().getJobDefinition().getParameters(), // jobDetails.getRequest().getJobDefinition().getJobSla(), stage.getHardConstraints(), stage.getSoftConstraints(), // schedulingInfo, subscriptionTimeoutSecs, getMinRuntimeSecs(), submittedAt, jobDetails.getUser(), Optional.empty()); // workerRequests.add(workerRequest); // } // } //}
4,258
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/LeadershipManagerLocalImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import io.mantisrx.server.core.master.MasterDescription; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class LeadershipManagerLocalImpl implements ILeadershipManager { private static final Logger logger = LoggerFactory.getLogger(LeadershipManagerLocalImpl.class); private final MasterDescription masterDescription; private volatile boolean isLeader = true; private volatile boolean isReady = false; public LeadershipManagerLocalImpl(MasterDescription masterDescription) { this.masterDescription = masterDescription; } @Override public void becomeLeader() { logger.info("Becoming leader now"); isLeader = true; } @Override public boolean isLeader() { logger.debug("is leader? {}", isLeader); return isLeader; } @Override public boolean isReady() { return isReady; } @Override public void setLeaderReady() { logger.info("marking leader READY"); isReady = true; } @Override public void stopBeingLeader() { logger.info("Asked to stop being leader now"); isReady = false; isLeader = false; } @Override public MasterDescription getDescription() { return masterDescription; } }
4,259
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/MantisJobStatus.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import io.mantisrx.server.core.Status; import rx.Observable; public class MantisJobStatus { private String jobId; private String name; private long timestamp; private Observable<Status> status; private String fatalError = null; MantisJobStatus(String jobId, Observable<Status> status, String name) { this.jobId = jobId; this.status = status; this.name = name; timestamp = System.currentTimeMillis(); } public String getJobId() { return jobId; } public Observable<Status> getStatus() { return status; } public String getName() { return name; } public long getTimestamp() { return timestamp; } public boolean hasFatalError() { return fatalError != null; } public String getFatalError() { return fatalError; } public void setFatalError(String fatalError) { this.fatalError = fatalError; } }
4,260
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/AgentClustersAutoScaler.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import com.netflix.fenzo.AutoScaleAction; import com.netflix.fenzo.AutoScaleRule; import rx.Observer; import rx.functions.Func0; public class AgentClustersAutoScaler { private static final AtomicBoolean initialized = new AtomicBoolean(false); private static AgentClustersAutoScaler autoScaler; private final Func0<Set<AutoScaleRule>> rulesGetter; private final Observer<AutoScaleAction> autoScaleActionObserver; private AgentClustersAutoScaler(Func0<Set<AutoScaleRule>> rulesGetter, Observer<AutoScaleAction> autoScaleActionObserver) { this.rulesGetter = rulesGetter; this.autoScaleActionObserver = autoScaleActionObserver; } public synchronized static void initialize(Func0<Set<AutoScaleRule>> rulesGetter, Observer<AutoScaleAction> autoScaleActionObserver) { if (!initialized.compareAndSet(false, true)) throw new IllegalStateException(AgentClustersAutoScaler.class.getName() + " already initialized"); autoScaler = new AgentClustersAutoScaler(rulesGetter, autoScaleActionObserver); } public static AgentClustersAutoScaler get() throws IllegalStateException { if (!initialized.get()) throw new IllegalStateException(AgentClustersAutoScaler.class.getName() + " not initialized"); return autoScaler; } public Set<AutoScaleRule> getRules() { return rulesGetter == null ? null : rulesGetter.call(); } public Observer<AutoScaleAction> getAutoScaleActionObserver() { return autoScaleActionObserver; } }
4,261
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/AgentFitnessCalculator.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import com.netflix.fenzo.TaskRequest; import com.netflix.fenzo.TaskTrackerState; import com.netflix.fenzo.VMTaskFitnessCalculator; import com.netflix.fenzo.VirtualMachineCurrentState; import com.netflix.fenzo.functions.Func1; import com.netflix.fenzo.plugins.BinPackingFitnessCalculators; import io.mantisrx.server.master.config.ConfigurationProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class AgentFitnessCalculator implements VMTaskFitnessCalculator { private static final Logger logger = LoggerFactory.getLogger(AgentFitnessCalculator.class); final VMTaskFitnessCalculator binPacker = BinPackingFitnessCalculators.cpuMemNetworkBinPacker; final VMTaskFitnessCalculator durationTypeFitnessCalculator = new DurationTypeFitnessCalculator(); final VMTaskFitnessCalculator clusterFitnessCalculator = new ClusterFitnessCalculator(); private final double binPackingWeight; private final double clusterWeight; private final double durationTypeWeight; private final double goodEnoughThreshold; private final Func1<Double, Boolean> fitnessGoodEnoughFunc; public AgentFitnessCalculator() { binPackingWeight = ConfigurationProvider.getConfig().getBinPackingFitnessWeight(); clusterWeight = ConfigurationProvider.getConfig().getPreferredClusterFitnessWeight(); durationTypeWeight = ConfigurationProvider.getConfig().getDurationTypeFitnessWeight(); goodEnoughThreshold = ConfigurationProvider.getConfig().getFitnessGoodEnoughThreshold(); logger.info("clusterWeight {} durationTypeWeight {} binPackingWeight {} goodEnoughThreshold {}", clusterWeight, durationTypeWeight, binPackingWeight, goodEnoughThreshold); this.fitnessGoodEnoughFunc = f -> f > goodEnoughThreshold; } @Override public String getName() { return "Mantis Agent Task Fitness Calculator"; } @Override public double calculateFitness(TaskRequest taskRequest, VirtualMachineCurrentState targetVM, TaskTrackerState taskTrackerState) { double binPackingValue = binPacker.calculateFitness(taskRequest, targetVM, taskTrackerState); double durationTypeFitness = durationTypeFitnessCalculator.calculateFitness(taskRequest, targetVM, taskTrackerState); double clusterFitnessValue = clusterFitnessCalculator.calculateFitness(taskRequest, targetVM, taskTrackerState); // add others such as stream locality fitness calculator if (logger.isDebugEnabled()) { logger.debug("cluster {} duration {} binpack score {} total {}", clusterFitnessValue * clusterWeight, durationTypeFitness * durationTypeWeight, binPackingValue * binPackingWeight, (binPackingValue * binPackingWeight + durationTypeFitness * durationTypeWeight + clusterFitnessValue * clusterWeight)); } return (binPackingValue * binPackingWeight + durationTypeFitness * durationTypeWeight + clusterFitnessValue * clusterWeight) / (binPackingWeight + durationTypeWeight + clusterWeight); } public Func1<Double, Boolean> getFitnessGoodEnoughFunc() { return fitnessGoodEnoughFunc; } }
4,262
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/LaunchTaskException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; public class LaunchTaskException extends Exception { private static final long serialVersionUID = 1L; public LaunchTaskException(String message, Throwable cause) { super(message, cause); } }
4,263
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/VirtualMachineMasterService.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.util.List; import java.util.Map; import com.netflix.fenzo.VirtualMachineLease; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.scheduler.LaunchTaskRequest; import io.mantisrx.server.master.scheduler.ScheduleRequest; public interface VirtualMachineMasterService { Map<ScheduleRequest, LaunchTaskException> launchTasks(List<LaunchTaskRequest> requests, List<VirtualMachineLease> leases); void rejectLease(VirtualMachineLease lease); void killTask(final WorkerId workerId); }
4,264
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/ClusterAffinityConstraint.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.util.Map; import com.netflix.fenzo.ConstraintEvaluator; import com.netflix.fenzo.TaskRequest; import com.netflix.fenzo.TaskTrackerState; import com.netflix.fenzo.VirtualMachineCurrentState; import org.apache.mesos.Protos; public class ClusterAffinityConstraint implements ConstraintEvaluator { private final String asgAttributeName; private final String clusterName; private final String name; public ClusterAffinityConstraint(String clusterAttributeName, String clusterName) { this.asgAttributeName = clusterAttributeName; this.clusterName = clusterName; this.name = ClusterAffinityConstraint.class.getName() + "-" + clusterAttributeName; } @Override public String getName() { return name; } /** * Determines whether a particular target host is appropriate for a particular task request by rejecting any * host that doesn't belong to the specified cluster. * * @param taskRequest describes the task being considered for assignment to the host * @param targetVM describes the host being considered as a target for the task * @param taskTrackerState describes the state of tasks previously assigned or already running throughout * the system * * @return a successful Result if the target does not have the same value for its unique constraint * attribute as another host that has already been assigned a co-task of {@code taskRequest}, or an * unsuccessful Result otherwise */ @Override public Result evaluate(TaskRequest taskRequest, VirtualMachineCurrentState targetVM, TaskTrackerState taskTrackerState) { //String clusterName = AttributeUtilities.getAttrValue(targetVM.getCurrAvailableResources(), hostAttributeName); Map<String, Protos.Attribute> attributeMap = targetVM.getCurrAvailableResources().getAttributeMap(); if (asgAttributeName != null && attributeMap != null && attributeMap.get(asgAttributeName) != null) { if (attributeMap.get(asgAttributeName).getText().isInitialized()) { String targetClusterName = attributeMap.get(asgAttributeName).getText().getValue(); if (targetClusterName.startsWith(clusterName)) { return new Result(true, ""); } else { return new Result(false, asgAttributeName + " does not begin with " + clusterName); } } } return new Result(false, asgAttributeName + " unavailable on host " + targetVM.getCurrAvailableResources().hostname()); } }
4,265
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/MantisJobOperations.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.io.IOException; import java.net.URL; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Optional; import io.mantisrx.common.Label; import io.mantisrx.runtime.MantisJobDefinition; import io.mantisrx.runtime.NamedJobDefinition; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.server.master.store.InvalidJobException; import io.mantisrx.server.master.store.InvalidNamedJobException; import io.mantisrx.server.master.store.MantisJobStore; import io.mantisrx.server.master.store.NamedJob; import io.mantisrx.server.master.store.NamedJobDeleteException; import rx.Observable; import rx.functions.Action1; import rx.functions.Func2; public interface MantisJobOperations { NamedJob createNamedJob(NamedJobDefinition namedJobDefinition) throws InvalidNamedJobException; NamedJob updateNamedJar(NamedJobDefinition namedJobDefinition, boolean createIfNeeded) throws InvalidNamedJobException; NamedJob quickUpdateNamedJob(String user, String name, URL jobJar, String version) throws InvalidNamedJobException; void updateSla(String user, String name, NamedJob.SLA sla, boolean forceEnable) throws InvalidNamedJobException; /** * Update the Labels associated with the Job cluster. This complete replaces any existing labels. * * @param user : submitter * @param name : Job cluster name * @param labels List of Label objects * * @throws InvalidNamedJobException */ void updateLabels(String user, String name, List<Label> labels) throws InvalidNamedJobException; void updateMigrateStrategy(String user, String name, WorkerMigrationConfig migrationConfig) throws InvalidNamedJobException; String quickSubmit(String jobName, String user) throws InvalidNamedJobException, InvalidJobException; Optional<NamedJob> getNamedJob(String name); void deleteNamedJob(String name, String user) throws NamedJobDeleteException; void disableNamedJob(String name, String user) throws InvalidNamedJobException; void enableNamedJob(String name, String user) throws InvalidNamedJobException; MantisJobStatus submit(MantisJobDefinition jobDefinition); boolean deleteJob(String jobId) throws IOException; void killJob(String user, String jobId, String reason); void terminateJob(String jobId); Observable<MantisJobStatus> jobs(); MantisJobStatus status(String jobId); Func2<MantisJobStore, Map<String, MantisJobDefinition>, Collection<NamedJob>> getJobsInitializer(); Collection<MantisJobMgr> getAllJobMgrs(); Optional<MantisJobMgr> getJobMgr(String jobId); Action1<String> getSlaveDisabler(); Action1<String> getSlaveEnabler(); void setReady(); }
4,266
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/ConstraintsEvaluators.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.util.Set; import com.netflix.fenzo.AsSoftConstraint; import com.netflix.fenzo.ConstraintEvaluator; import com.netflix.fenzo.VMTaskFitnessCalculator; import com.netflix.fenzo.functions.Func1; import com.netflix.fenzo.plugins.BalancedHostAttrConstraint; import com.netflix.fenzo.plugins.ExclusiveHostConstraint; import com.netflix.fenzo.plugins.UniqueHostAttrConstraint; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.server.master.config.ConfigurationProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ConstraintsEvaluators { private static final String MANTISAGENT_MAIN_M4 = "mantisagent-main-m4"; private static final String MANTISAGENT_MAIN_M3 = "mantisagent-main-m3"; private static final String MANTISAGENT_MAIN_M5 = "mantisagent-main-m5"; private static final int EXPECTED_NUM_ZONES = 3; private static final Logger logger = LoggerFactory.getLogger(ConstraintsEvaluators.class); public static ExclusiveHostConstraint exclusiveHostConstraint = new ExclusiveHostConstraint(); public static ConstraintEvaluator hardConstraint(JobConstraints constraint, final Set<String> coTasks) { switch (constraint) { case ExclusiveHost: return exclusiveHostConstraint; case UniqueHost: return new UniqueHostAttrConstraint(new Func1<String, Set<String>>() { @Override public Set<String> call(String s) { return coTasks; } }); case ZoneBalance: return new BalancedHostAttrConstraint(new Func1<String, Set<String>>() { @Override public Set<String> call(String s) { return coTasks; } }, zoneAttributeName(), EXPECTED_NUM_ZONES); case M4Cluster: return new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M4); case M3Cluster: return new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M3); case M5Cluster: return new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M5); default: logger.error("Unknown job hard constraint " + constraint); return null; } } public static String asgAttributeName() { return ConfigurationProvider.getConfig().getActiveSlaveAttributeName(); } public static String zoneAttributeName() { return ConfigurationProvider.getConfig().getHostZoneAttributeName(); } public static VMTaskFitnessCalculator softConstraint(JobConstraints constraint, final Set<String> coTasks) { switch (constraint) { case ExclusiveHost: return AsSoftConstraint.get(exclusiveHostConstraint); case UniqueHost: return AsSoftConstraint.get(new UniqueHostAttrConstraint(new Func1<String, Set<String>>() { @Override public Set<String> call(String s) { return coTasks; } })); case ZoneBalance: return new BalancedHostAttrConstraint(new Func1<String, Set<String>>() { @Override public Set<String> call(String s) { return coTasks; } }, zoneAttributeName(), EXPECTED_NUM_ZONES).asSoftConstraint(); case M4Cluster: return AsSoftConstraint.get(new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M4)); case M3Cluster: return AsSoftConstraint.get(new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M3)); case M5Cluster: return AsSoftConstraint.get(new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M5)); default: logger.error("Unknown job soft constraint " + constraint); return null; } } }
4,267
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/MantisAuditLogWriter.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observer; import rx.Subscriber; import rx.functions.Action0; import rx.observers.SerializedObserver; import rx.schedulers.Schedulers; import rx.subjects.PublishSubject; public class MantisAuditLogWriter { private static final Logger logger = LoggerFactory.getLogger(MantisAuditLogWriter.class); private static MantisAuditLogWriter instance; private final PublishSubject<MantisAuditLogEvent> subject; private final int backPressureBufferSize = 1000; private MantisAuditLogWriter(Subscriber<MantisAuditLogEvent> subscriber) { subject = PublishSubject.create(); subject .onBackpressureBuffer(backPressureBufferSize, new Action0() { @Override public void call() { logger.warn("Exceeded back pressure buffer of " + backPressureBufferSize); } }) .observeOn(Schedulers.computation()) .subscribe(subscriber); } public static void initialize(Subscriber<MantisAuditLogEvent> subscriber) { instance = new MantisAuditLogWriter(subscriber); } public static MantisAuditLogWriter getInstance() { if (instance == null) throw new IllegalStateException(MantisAuditLogWriter.class.getName() + " must be initialized before use"); return instance; } public Observer<MantisAuditLogEvent> getObserver() { return new SerializedObserver<>(subject); } }
4,268
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/WorkerRequest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.net.URL; import java.util.ArrayList; import java.util.List; import java.util.Optional; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.parameter.Parameter; public class WorkerRequest { private final long subscriptionTimeoutSecs; private final long minRuntimeSecs; private final long jobSubmittedAt; private final String user; // preferred Cluster to launch the worker on private final Optional<String> preferredCluster; private String jobName; private String jobId; private int workerIndex; private int workerNumber; private URL jobJarUrl; private int workerStage; private int totalStages; private MachineDefinition definition; private int numInstancesAtStage; private int numPortsPerInstance; private int metricsPort = -1; private int debugPort = -1; private int consolePort = -1; private int customPort = -1; private List<Integer> ports; private List<Parameter> parameters; private JobSla jobSla; private List<JobConstraints> hardConstraints; private List<JobConstraints> softConstraints; private SchedulingInfo schedulingInfo; public WorkerRequest(MachineDefinition definition, String jobId, int workerIndex, int workerNumber, URL jobJarUrl, int workerStage, int totalStages, int numInstancesAtStage, String jobName, int numPortsPerInstance, List<Parameter> parameters, JobSla jobSla, List<JobConstraints> hardConstraints, List<JobConstraints> softConstraints, SchedulingInfo schedulingInfo, long subscriptionTimeoutSecs, long minRuntimeSecs, long jobSubmittedAt, final String user, final Optional<String> preferredCluster) { this.definition = definition; this.jobId = jobId; this.workerIndex = workerIndex; this.workerNumber = workerNumber; this.jobJarUrl = jobJarUrl; this.workerStage = workerStage; this.totalStages = totalStages; this.numInstancesAtStage = numInstancesAtStage; this.jobName = jobName; this.numPortsPerInstance = numPortsPerInstance + 4; // add additional ports for metricsPort, debugPort, consolePort and customPort ports = new ArrayList<>(); this.parameters = parameters; this.jobSla = jobSla; this.hardConstraints = hardConstraints; this.softConstraints = softConstraints; this.schedulingInfo = schedulingInfo; this.subscriptionTimeoutSecs = subscriptionTimeoutSecs; this.minRuntimeSecs = minRuntimeSecs; this.jobSubmittedAt = jobSubmittedAt; this.user = user; this.preferredCluster = preferredCluster; } public static int getNumPortsPerInstance(MachineDefinition machineDefinition) { return machineDefinition.getNumPorts() + 1; } public SchedulingInfo getSchedulingInfo() { return schedulingInfo; } public List<Parameter> getParameters() { return parameters; } public MachineDefinition getDefinition() { return definition; } public String getJobId() { return jobId; } public int getWorkerIndex() { return workerIndex; } public int getWorkerNumber() { return workerNumber; } public URL getJobJarUrl() { return jobJarUrl; } public int getWorkerStage() { return workerStage; } public int getTotalStages() { return totalStages; } public int getNumInstancesAtStage() { return numInstancesAtStage; } public String getJobName() { return jobName; } public int getNumPortsPerInstance() { return numPortsPerInstance; } public int getMetricsPort() { return metricsPort; } public int getDebugPort() { return debugPort; } public int getConsolePort() { return consolePort; } public int getCustomPort() { return customPort; } public void addPort(int port) { if (metricsPort == -1) { metricsPort = port; // fill metricsPort first } else if (debugPort == -1) { debugPort = port; // fill debug port next } else if (consolePort == -1) { consolePort = port; // fill console port next } else if (customPort == -1) { customPort = port; // fill custom port next } else { ports.add(port); } } public List<Integer> getPorts() { return ports; } public List<Integer> getAllPortsUsed() { List<Integer> allPorts = new ArrayList<>(ports); allPorts.add(metricsPort); allPorts.add(debugPort); allPorts.add(consolePort); allPorts.add(customPort); return allPorts; } public JobSla getJobSla() { return jobSla; } public List<JobConstraints> getHardConstraints() { return hardConstraints; } public List<JobConstraints> getSoftConstraints() { return softConstraints; } public long getSubscriptionTimeoutSecs() { return subscriptionTimeoutSecs; } public long getMinRuntimeSecs() { return minRuntimeSecs; } public long getJobSubmittedAt() { return jobSubmittedAt; } public String getUser() { return user; } public Optional<String> getPreferredCluster() { return preferredCluster; } @Override public String toString() { return jobId + "-Stage-" + workerStage + "-Worker-" + workerIndex; } }
4,269
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/JobRequest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import io.mantisrx.runtime.MantisJobDefinition; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.store.NamedJob; public class JobRequest { private final MantisJobMgr jobMgr = null; private String jobId; private MantisJobDefinition jobDefinition; public JobRequest(final String jobId, final MantisJobDefinition jobDefinition, final NamedJob namedJob, final MantisScheduler scheduler, final VirtualMachineMasterService vmService) { this.jobId = jobId; this.jobDefinition = jobDefinition; // this.jobMgr = new MantisJobMgr(jobId, jobDefinition, namedJob, scheduler, vmService); } public String getJobId() { return jobId; } public MantisJobDefinition getJobDefinition() { return jobDefinition; } public MantisJobMgr getJobMgr() { return jobMgr; } }
4,270
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/MasterMain.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Properties; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.actor.DeadLetter; import akka.actor.Props; import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException; import com.netflix.fenzo.AutoScaleAction; import com.netflix.fenzo.AutoScaleRule; import com.netflix.fenzo.VirtualMachineLease; import com.sampullara.cli.Args; import com.sampullara.cli.Argument; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.master.DeadLetterActor; import io.mantisrx.master.JobClustersManagerActor; import io.mantisrx.master.JobClustersManagerService; import io.mantisrx.master.api.akka.MasterApiAkkaService; import io.mantisrx.master.events.AuditEventBrokerActor; import io.mantisrx.master.events.AuditEventSubscriber; import io.mantisrx.master.events.AuditEventSubscriberAkkaImpl; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventBrokerActor; import io.mantisrx.master.events.StatusEventSubscriber; import io.mantisrx.master.events.StatusEventSubscriberAkkaImpl; import io.mantisrx.master.events.WorkerEventSubscriber; import io.mantisrx.master.events.WorkerRegistryV2; import io.mantisrx.master.scheduler.AgentsErrorMonitorActor; import io.mantisrx.master.scheduler.JobMessageRouterImpl; import io.mantisrx.master.vm.AgentClusterOperationsImpl; import io.mantisrx.master.zk.LeaderElector; import io.mantisrx.server.core.Service; import io.mantisrx.server.core.json.DefaultObjectMapper; import io.mantisrx.server.core.master.LocalMasterMonitor; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.core.metrics.MetricsPublisherService; import io.mantisrx.server.core.metrics.MetricsServerService; import io.mantisrx.server.core.zookeeper.CuratorService; import io.mantisrx.server.master.config.ConfigurationFactory; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.config.MasterConfiguration; import io.mantisrx.server.master.config.StaticPropertiesConfigurationFactory; import io.mantisrx.server.master.mesos.MesosDriverSupplier; import io.mantisrx.server.master.mesos.VirtualMachineMasterServiceMesosImpl; import io.mantisrx.server.master.persistence.IMantisStorageProvider; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.MantisStorageProviderAdapter; import io.mantisrx.server.master.scheduler.JobMessageRouter; import io.mantisrx.server.master.scheduler.WorkerRegistry; import org.apache.curator.utils.ZKPaths; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.Observer; import rx.subjects.PublishSubject; public class MasterMain implements Service { private static final Logger logger = LoggerFactory.getLogger(MasterMain.class); @Argument(alias = "p", description = "Specify a configuration file", required = false) private static String propFile = "master.properties"; private final ServiceLifecycle mantisServices = new ServiceLifecycle(); private final AtomicBoolean shutdownInitiated = new AtomicBoolean(false); private CountDownLatch blockUntilShutdown = new CountDownLatch(1); private volatile CuratorService curatorService = null; private volatile AgentClusterOperationsImpl agentClusterOps = null; private MasterConfiguration config; private SchedulingService schedulingService; private ILeadershipManager leadershipManager; public MasterMain(ConfigurationFactory configFactory, AuditEventSubscriber auditEventSubscriber) { String test = "{\"jobId\":\"sine-function-1\",\"status\":{\"jobId\":\"sine-function-1\",\"stageNum\":1,\"workerIndex\":0,\"workerNumber\":2,\"type\":\"HEARTBEAT\",\"message\":\"heartbeat\",\"state\":\"Noop\",\"hostname\":null,\"timestamp\":1525813363585,\"reason\":\"Normal\",\"payloads\":[{\"type\":\"SubscriptionState\",\"data\":\"false\"},{\"type\":\"IncomingDataDrop\",\"data\":\"{\\\"onNextCount\\\":0,\\\"droppedCount\\\":0}\"}]}}"; Metrics metrics = new Metrics.Builder() .id("MasterMain") .addCounter("masterInitSuccess") .addCounter("masterInitError") .build(); Metrics m = MetricsRegistry.getInstance().registerAndGet(metrics); try { ConfigurationProvider.initialize(configFactory); this.config = ConfigurationProvider.getConfig(); leadershipManager = new LeadershipManagerZkImpl(config, mantisServices); Thread t = new Thread(() -> shutdown()); t.setDaemon(true); // shutdown hook Runtime.getRuntime().addShutdownHook(t); // shared state PublishSubject<String> vmLeaseRescindedSubject = PublishSubject.create(); final ActorSystem system = ActorSystem.create("MantisMaster"); // log dead letter messages final ActorRef actor = system.actorOf(Props.create(DeadLetterActor.class), "MantisDeadLetter"); system.eventStream().subscribe(actor, DeadLetter.class); //final IMantisStorageProvider mantisStorageProvider = new SimpleCachedFileStorageProvider(false); ActorRef agentsErrorMonitorActor = system.actorOf(AgentsErrorMonitorActor.props(), "AgentsErrorMonitor"); ActorRef statusEventBrokerActor = system.actorOf(StatusEventBrokerActor.props(agentsErrorMonitorActor), "StatusEventBroker"); ActorRef auditEventBrokerActor = system.actorOf(AuditEventBrokerActor.props(auditEventSubscriber), "AuditEventBroker"); final StatusEventSubscriber statusEventSubscriber = new StatusEventSubscriberAkkaImpl(statusEventBrokerActor); final AuditEventSubscriber auditEventSubscriberAkka = new AuditEventSubscriberAkkaImpl(auditEventBrokerActor); final WorkerEventSubscriber workerEventSubscriber = WorkerRegistryV2.INSTANCE; // TODO who watches actors created at this level? final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(auditEventSubscriberAkka, statusEventSubscriber, workerEventSubscriber); IMantisStorageProvider storageProvider = new MantisStorageProviderAdapter(this.config.getStorageProvider(), lifecycleEventPublisher); final MantisJobStore mantisJobStore = new MantisJobStore(storageProvider); final ActorRef jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props(mantisJobStore, lifecycleEventPublisher), "JobClustersManager"); final JobMessageRouter jobMessageRouter = new JobMessageRouterImpl(jobClusterManagerActor); final WorkerRegistry workerRegistry = WorkerRegistryV2.INSTANCE; final MesosDriverSupplier mesosDriverSupplier = new MesosDriverSupplier(this.config, vmLeaseRescindedSubject, jobMessageRouter, workerRegistry); final VirtualMachineMasterServiceMesosImpl vmService = new VirtualMachineMasterServiceMesosImpl( this.config, getDescriptionJson(), mesosDriverSupplier); schedulingService = new SchedulingService(jobMessageRouter, workerRegistry, vmLeaseRescindedSubject, vmService); mesosDriverSupplier.setAddVMLeaseAction(schedulingService::addOffers); // initialize agents error monitor agentsErrorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(schedulingService), ActorRef.noSender()); final boolean loadJobsFromStoreOnInit = true; final JobClustersManagerService jobClustersManagerService = new JobClustersManagerService(jobClusterManagerActor, schedulingService, loadJobsFromStoreOnInit); this.agentClusterOps = new AgentClusterOperationsImpl(storageProvider, jobMessageRouter, schedulingService, lifecycleEventPublisher, ConfigurationProvider.getConfig().getActiveSlaveAttributeName()); // start serving metrics if (config.getMasterMetricsPort() > 0) new MetricsServerService(config.getMasterMetricsPort(), 1, Collections.emptyMap()).start(); new MetricsPublisherService(config.getMetricsPublisher(), config.getMetricsPublisherFrequencyInSeconds(), new HashMap<>()).start(); // services mantisServices.addService(vmService); mantisServices.addService(schedulingService); mantisServices.addService(jobClustersManagerService); mantisServices.addService(agentClusterOps); if (this.config.isLocalMode()) { leadershipManager.becomeLeader(); mantisServices.addService(new MasterApiAkkaService(new LocalMasterMonitor(leadershipManager.getDescription()), leadershipManager.getDescription(), jobClusterManagerActor, statusEventBrokerActor, config.getApiPort(), storageProvider, schedulingService, lifecycleEventPublisher, leadershipManager, agentClusterOps)); } else { curatorService = new CuratorService(this.config, leadershipManager.getDescription()); curatorService.start(); mantisServices.addService(createLeaderElector(curatorService, leadershipManager)); mantisServices.addService(new MasterApiAkkaService(curatorService.getMasterMonitor(), leadershipManager.getDescription(), jobClusterManagerActor, statusEventBrokerActor, config.getApiPort(), storageProvider, schedulingService, lifecycleEventPublisher, leadershipManager, agentClusterOps)); } m.getCounter("masterInitSuccess").increment(); } catch (Exception e) { logger.error("caught exception on Mantis Master initialization", e); m.getCounter("masterInitError").increment(); shutdown(); System.exit(1); } } private static Properties loadProperties(String propFile) { // config Properties props = new Properties(); try (InputStream in = findResourceAsStream(propFile)) { props.load(in); } catch (IOException e) { throw new RuntimeException(String.format("Can't load properties from the given property file %s: %s", propFile, e.getMessage()), e); } return props; } /** * Finds the given resource and returns its input stream. This method seeks the file first from the current working directory, * and then in the class path. * * @param resourceName the name of the resource. It can either be a file name, or a path. * * @return An {@link java.io.InputStream} instance that represents the found resource. Null otherwise. * * @throws FileNotFoundException */ private static InputStream findResourceAsStream(String resourceName) throws FileNotFoundException { File resource = new File(resourceName); if (resource.exists()) { return new FileInputStream(resource); } InputStream is = Thread.currentThread().getContextClassLoader().getResourceAsStream(resourceName); if (is == null) { throw new FileNotFoundException(String.format("Can't find property file %s. Make sure the property file is either in your path or in your classpath ", resourceName)); } return is; } private static void setupDummyAgentClusterAutoScaler() { final AutoScaleRule dummyAutoScaleRule = new AutoScaleRule() { @Override public String getRuleName() { return "test"; } @Override public int getMinIdleHostsToKeep() { return 1; } @Override public int getMaxIdleHostsToKeep() { return 10; } @Override public long getCoolDownSecs() { return 300; } @Override public boolean idleMachineTooSmall(VirtualMachineLease lease) { return false; } }; AgentClustersAutoScaler.initialize(() -> new HashSet<>(Collections.singletonList(dummyAutoScaleRule)), new Observer<AutoScaleAction>() { @Override public void onCompleted() { } @Override public void onError(Throwable e) { } @Override public void onNext(AutoScaleAction autoScaleAction) { } }); } public static void main(String[] args) { try { Args.parse(MasterMain.class, args); } catch (IllegalArgumentException e) { Args.usage(MasterMain.class); System.exit(1); } try { StaticPropertiesConfigurationFactory factory = new StaticPropertiesConfigurationFactory(loadProperties(propFile)); setupDummyAgentClusterAutoScaler(); final AuditEventSubscriber auditEventSubscriber = new AuditEventSubscriberLoggingImpl(); MasterMain master = new MasterMain(factory, auditEventSubscriber); master.start(); // blocks until shutdown hook (ctrl-c) } catch (Exception e) { // unexpected to get a RuntimeException, will exit logger.error("Unexpected error: " + e.getMessage(), e); System.exit(2); } } private LeaderElector createLeaderElector(CuratorService curatorService, ILeadershipManager leadershipManager) { return LeaderElector.builder(leadershipManager) .withCurator(curatorService.getCurator()) .withJsonMapper(DefaultObjectMapper.getInstance()) .withElectionPath(ZKPaths.makePath(config.getZkRoot(), config.getLeaderElectionPath())) .withAnnouncementPath(ZKPaths.makePath(config.getZkRoot(), config.getLeaderAnnouncementPath())) .build(); } @Override public void start() { logger.info("Starting Mantis Master"); mantisServices.start(); try { blockUntilShutdown.await(); } catch (InterruptedException e) { throw new RuntimeException(e); } } @Override public void enterActiveMode() { } @Override public void shutdown() { if (shutdownInitiated.compareAndSet(false, true)) { logger.info("Shutting down Mantis Master"); mantisServices.shutdown(); boolean shutdownCuratorEnabled = ConfigurationProvider.getConfig().getShutdownCuratorServiceEnabled(); if (curatorService != null && shutdownCuratorEnabled) { logger.info("Shutting down Curator Service"); curatorService.shutdown(); } else { logger.info("not shutting down curator service {} shutdownEnabled? {}", curatorService, shutdownCuratorEnabled); } blockUntilShutdown.countDown(); } else logger.info("Shutdown already initiated, not starting again"); } public MasterConfiguration getConfig() { return config; } public String getDescriptionJson() { try { return DefaultObjectMapper.getInstance().writeValueAsString(leadershipManager.getDescription()); } catch (JsonProcessingException e) { throw new IllegalStateException(String.format("Failed to convert the description %s to JSON: %s", leadershipManager.getDescription(), e.getMessage()), e); } } public AgentClusterOperationsImpl getAgentClusterOps() { return agentClusterOps; } public Consumer<String> getAgentVMEnabler() { return schedulingService::enableVM; } public Observable<MasterDescription> getMasterObservable() { return curatorService == null ? Observable.empty() : curatorService.getMasterMonitor().getMasterObservable(); } public boolean isLeader() { return leadershipManager.isLeader(); } }
4,271
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/ServiceLifecycle.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.util.Iterator; import java.util.LinkedList; import io.mantisrx.server.core.BaseService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Manage lifecycle of services. Services added in an order are started in the same order and shutdown in the reverse * order. Each service being added is implicitly given the previously added service as its predecessor. This class * currently represents a model of a linear list of services, each depending on only the previous service in the list. */ public class ServiceLifecycle { private static final Logger logger = LoggerFactory.getLogger(ServiceLifecycle.class); private LinkedList<BaseService> servicesList = new LinkedList<BaseService>(); void addService(BaseService service) { if (!servicesList.isEmpty()) { service.addPredecessor(servicesList.getLast()); } servicesList.add(service); } void start() { for (BaseService service : servicesList) { try { logger.info("Starting service " + service.getMyServiceCount() + ": " + service); service.start(); logger.info("Successfully started service " + service.getMyServiceCount() + ": " + service); } catch (Exception e) { logger.error(String.format("Failed to start service %d: %s: %s", service.getMyServiceCount(), service, e.getMessage()), e); throw e; } } } void becomeLeader() { for (BaseService service : servicesList) { service.enterActiveMode(); } } void shutdown() { if (!servicesList.isEmpty()) { Iterator<BaseService> iterator = servicesList.descendingIterator(); while (iterator.hasNext()) { BaseService service = iterator.next(); logger.info("Shutting down service " + service.getMyServiceCount() + ": " + service); service.shutdown(); logger.info("Successfully shut down service " + service.getMyServiceCount() + ": " + service); } } } }
4,272
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/SchedulingService.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import static io.mantisrx.server.master.scheduler.ScheduleRequest.DEFAULT_Q_ATTRIBUTES; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; import java.util.stream.Collectors; import io.mantisrx.shaded.com.google.common.collect.Sets; import com.netflix.fenzo.AutoScaleAction; import com.netflix.fenzo.AutoScaleRule; import com.netflix.fenzo.SchedulingResult; import com.netflix.fenzo.TaskAssignmentResult; import com.netflix.fenzo.TaskRequest; import com.netflix.fenzo.TaskScheduler; import com.netflix.fenzo.TaskSchedulingService; import com.netflix.fenzo.VMAssignmentResult; import com.netflix.fenzo.VirtualMachineCurrentState; import com.netflix.fenzo.VirtualMachineLease; import com.netflix.fenzo.queues.TaskQueue; import com.netflix.fenzo.queues.TaskQueueException; import com.netflix.fenzo.queues.tiered.TieredQueue; import io.mantisrx.common.WorkerPorts; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Gauge; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.common.metrics.spectator.GaugeCallback; import io.mantisrx.common.metrics.spectator.MetricGroupId; import io.mantisrx.common.metrics.spectator.MetricId; import io.mantisrx.common.metrics.spectator.SpectatorRegistryFactory; import io.mantisrx.server.core.BaseService; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.scheduler.JobMessageRouter; import io.mantisrx.server.master.scheduler.LaunchTaskRequest; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.scheduler.ScheduleRequest; import io.mantisrx.server.master.scheduler.SchedulingStateManager; import io.mantisrx.server.master.scheduler.WorkerLaunchFailed; import io.mantisrx.server.master.scheduler.WorkerLaunched; import io.mantisrx.server.master.scheduler.WorkerRegistry; import io.mantisrx.server.master.scheduler.WorkerUnscheduleable; import org.HdrHistogram.SynchronizedHistogram; import org.joda.time.DateTime; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.Observer; import rx.functions.Action1; import rx.schedulers.Schedulers; //import io.mantisrx.server.core.domain.WorkerPorts; public class SchedulingService extends BaseService implements MantisScheduler { private static final Logger logger = LoggerFactory.getLogger(SchedulingService.class); private final JobMessageRouter jobMessageRouter; private final WorkerRegistry workerRegistry; private final TaskScheduler taskScheduler; private final TaskSchedulingService taskSchedulingService; private final TieredQueue taskQueue; private final Counter numWorkersLaunched; private final Counter numResourceOffersReceived; private final Counter numResourceAllocations; private final Counter numResourceOffersRejected; private final Gauge workersToLaunch; private final Gauge pendingWorkers; private final Gauge schedulerRunMillis; private final Counter perWorkerSchedulingTimeMs; private final SynchronizedHistogram workerAcceptedToLaunchedDistMs = new SynchronizedHistogram(3_600_000L, 3); private final Gauge totalActiveAgents; private final Counter numAgentsUsed; private final Gauge idleAgents; private final Gauge totalAvailableCPUs; private final Gauge totalAllocatedCPUs; private final Gauge totalAvailableMemory; private final Gauge totalAllocatedMemory; private final Gauge totalAvailableNwMbps; private final Gauge totalAllocatedNwMbps; private final Gauge cpuUtilization; private final Gauge memoryUtilization; private final Gauge networkUtilization; private final Gauge dominantResUtilization; private final Gauge fenzoLaunchedTasks; private final Gauge jobMgrRunningWorkers; private final Counter numAutoScaleUpActions; private final Counter numAutoScaleDownActions; private final Counter numMissingWorkerPorts; private final Counter schedulingResultExceptions; private final Counter schedulingCallbackExceptions; private final SchedulingStateManager schedulingState; private final AtomicInteger idleMachinesCount = new AtomicInteger(); private final String slaveClusterAttributeName; private final long vmCurrentStatesCheckInterval = 10000; private final AtomicLong lastVmCurrentStatesCheckDone = new AtomicLong(System.currentTimeMillis()); private VirtualMachineMasterService virtualMachineService; private long SCHEDULING_ITERATION_INTERVAL_MILLIS = 50; private long MAX_DELAY_MILLIS_BETWEEN_SCHEDULING_ITER = 5_000; private AtomicLong lastSchedulingResultCallback = new AtomicLong(System.currentTimeMillis()); public SchedulingService(final JobMessageRouter jobMessageRouter, final WorkerRegistry workerRegistry, final Observable<String> vmLeaseRescindedObservable, final VirtualMachineMasterService virtualMachineService) { super(true); this.schedulingState = new SchedulingStateManager(); this.jobMessageRouter = jobMessageRouter; this.workerRegistry = workerRegistry; this.virtualMachineService = virtualMachineService; this.slaveClusterAttributeName = ConfigurationProvider.getConfig().getSlaveClusterAttributeName(); SCHEDULING_ITERATION_INTERVAL_MILLIS = ConfigurationProvider.getConfig().getSchedulerIterationIntervalMillis(); AgentFitnessCalculator agentFitnessCalculator = new AgentFitnessCalculator(); TaskScheduler.Builder schedulerBuilder = new TaskScheduler.Builder() .withLeaseRejectAction(virtualMachineService::rejectLease) .withLeaseOfferExpirySecs(ConfigurationProvider.getConfig().getMesosLeaseOfferExpirySecs()) .withFitnessCalculator(agentFitnessCalculator) .withFitnessGoodEnoughFunction(agentFitnessCalculator.getFitnessGoodEnoughFunc()) .withAutoScaleByAttributeName(ConfigurationProvider.getConfig().getAutoscaleByAttributeName()); // set this always if (ConfigurationProvider.getConfig().getDisableShortfallEvaluation()) schedulerBuilder = schedulerBuilder.disableShortfallEvaluation(); taskScheduler = setupTaskSchedulerAndAutoScaler(vmLeaseRescindedObservable, schedulerBuilder); taskScheduler.setActiveVmGroupAttributeName(ConfigurationProvider.getConfig().getActiveSlaveAttributeName()); taskQueue = new TieredQueue(2); taskSchedulingService = setupTaskSchedulingService(taskScheduler); setupAutoscaleRulesDynamicUpdater(); MetricGroupId metricGroupId = new MetricGroupId(SchedulingService.class.getCanonicalName()); Metrics m = new Metrics.Builder() .id(metricGroupId) .addCounter("numWorkersLaunched") .addCounter("numResourceOffersReceived") .addCounter("numResourceAllocations") .addCounter("numResourceOffersRejected") .addGauge("workersToLaunch") .addGauge("pendingWorkers") .addGauge("schedulerRunMillis") .addCounter("perWorkerSchedulingTimeMillis") .addGauge(new GaugeCallback(metricGroupId, "workerAcceptedToLaunchedMsP50", () -> (double) workerAcceptedToLaunchedDistMs.getValueAtPercentile(50))) .addGauge(new GaugeCallback(metricGroupId, "workerAcceptedToLaunchedMsP95", () -> (double) workerAcceptedToLaunchedDistMs.getValueAtPercentile(95))) .addGauge(new GaugeCallback(metricGroupId, "workerAcceptedToLaunchedMsP99", () -> (double) workerAcceptedToLaunchedDistMs.getValueAtPercentile(99))) .addGauge(new GaugeCallback(metricGroupId, "workerAcceptedToLaunchedMsMax", () -> (double) workerAcceptedToLaunchedDistMs.getValueAtPercentile(100))) .addGauge("totalActiveAgents") .addCounter("numAgentsUsed") .addGauge("idleAgents") .addGauge("totalAvailableCPUs") .addGauge("totalAllocatedCPUs") .addGauge("totalAvailableMemory") .addGauge("totalAllocatedMemory") .addGauge("totalAvailableNwMbps") .addGauge("totalAllocatedNwMbps") .addGauge("cpuUtilization") .addGauge("memoryUtilization") .addGauge("networkUtilization") .addGauge("dominantResUtilization") .addCounter("numAutoScaleUpActions") .addCounter("numAutoScaleDownActions") .addGauge("fenzoLaunchedTasks") .addGauge("jobMgrRunningWorkers") .addCounter("numMissingWorkerPorts") .addCounter("schedulingResultExceptions") .addCounter("schedulingCallbackExceptions") .build(); m = MetricsRegistry.getInstance().registerAndGet(m); numWorkersLaunched = m.getCounter("numWorkersLaunched"); numResourceOffersReceived = m.getCounter("numResourceOffersReceived"); numResourceAllocations = m.getCounter("numResourceAllocations"); numResourceOffersRejected = m.getCounter("numResourceOffersRejected"); workersToLaunch = m.getGauge("workersToLaunch"); pendingWorkers = m.getGauge("pendingWorkers"); schedulerRunMillis = m.getGauge("schedulerRunMillis"); totalActiveAgents = m.getGauge("totalActiveAgents"); numAgentsUsed = m.getCounter("numAgentsUsed"); idleAgents = m.getGauge("idleAgents"); totalAvailableCPUs = m.getGauge("totalAvailableCPUs"); totalAllocatedCPUs = m.getGauge("totalAllocatedCPUs"); totalAvailableMemory = m.getGauge("totalAvailableMemory"); totalAllocatedMemory = m.getGauge("totalAllocatedMemory"); totalAvailableNwMbps = m.getGauge("totalAvailableNwMbps"); totalAllocatedNwMbps = m.getGauge("totalAllocatedNwMbps"); cpuUtilization = m.getGauge("cpuUtilization"); memoryUtilization = m.getGauge("memoryUtilization"); networkUtilization = m.getGauge("networkUtilization"); dominantResUtilization = m.getGauge("dominantResUtilization"); numAutoScaleUpActions = m.getCounter("numAutoScaleUpActions"); numAutoScaleDownActions = m.getCounter("numAutoScaleDownActions"); fenzoLaunchedTasks = m.getGauge("fenzoLaunchedTasks"); jobMgrRunningWorkers = m.getGauge("jobMgrRunningWorkers"); numMissingWorkerPorts = m.getCounter("numMissingWorkerPorts"); schedulingResultExceptions = m.getCounter("schedulingResultExceptions"); schedulingCallbackExceptions = m.getCounter("schedulingCallbackExceptions"); perWorkerSchedulingTimeMs = m.getCounter("perWorkerSchedulingTimeMillis"); } private TaskScheduler setupTaskSchedulerAndAutoScaler(Observable<String> vmLeaseRescindedObservable, TaskScheduler.Builder schedulerBuilder) { int minMinIdle = 4; schedulerBuilder = schedulerBuilder .withAutoScaleDownBalancedByAttributeName(ConfigurationProvider.getConfig().getHostZoneAttributeName()) .withAutoScalerMapHostnameAttributeName(ConfigurationProvider.getConfig().getAutoScalerMapHostnameAttributeName()); final AgentClustersAutoScaler agentClustersAutoScaler = AgentClustersAutoScaler.get(); try { if (agentClustersAutoScaler != null) { Set<AutoScaleRule> rules = agentClustersAutoScaler.getRules(); if (rules != null && !rules.isEmpty()) { for (AutoScaleRule rule : rules) { schedulerBuilder = schedulerBuilder.withAutoScaleRule(rule); minMinIdle = Math.min(minMinIdle, rule.getMinIdleHostsToKeep()); } } else logger.warn("No auto scale rules setup"); } } catch (IllegalStateException e) { logger.warn("Ignoring: " + e.getMessage()); } schedulerBuilder = schedulerBuilder.withMaxOffersToReject(Math.max(1, minMinIdle)); final TaskScheduler scheduler = schedulerBuilder.build(); vmLeaseRescindedObservable .doOnNext(new Action1<String>() { @Override public void call(String s) { if (s.equals("ALL")) scheduler.expireAllLeases(); else scheduler.expireLease(s); } }) .subscribe(); if (agentClustersAutoScaler != null) { final Observer<AutoScaleAction> autoScaleActionObserver = agentClustersAutoScaler.getAutoScaleActionObserver(); scheduler.setAutoscalerCallback(new com.netflix.fenzo.functions.Action1<AutoScaleAction>() { @Override public void call(AutoScaleAction action) { try { switch (action.getType()) { case Up: numAutoScaleUpActions.increment(); break; case Down: numAutoScaleDownActions.increment(); break; } autoScaleActionObserver.onNext(action); } catch (Exception e) { logger.warn("Will continue after exception calling autoscale action observer: " + e.getMessage(), e); } } }); } return scheduler; } private void setupAutoscaleRulesDynamicUpdater() { final Set<String> emptyHashSet = new HashSet<>(); Schedulers.computation().createWorker().schedulePeriodically(() -> { try { logger.debug("Updating cluster autoscale rules"); final AgentClustersAutoScaler agentClustersAutoScaler = AgentClustersAutoScaler.get(); if (agentClustersAutoScaler == null) { logger.warn("No agent cluster autoscaler defined, not setting up Fenzo autoscaler rules"); return; } final Set<AutoScaleRule> newRules = agentClustersAutoScaler.getRules(); final Collection<AutoScaleRule> currRules = taskScheduler.getAutoScaleRules(); final Set<String> currRulesNames = currRules == null || currRules.isEmpty() ? emptyHashSet : currRules.stream().collect((Supplier<Set<String>>) HashSet::new, (strings, autoScaleRule) -> strings.add(autoScaleRule.getRuleName()), Set::addAll); if (newRules != null && !newRules.isEmpty()) { for (AutoScaleRule r : newRules) { logger.debug("Setting up autoscale rule: " + r); taskScheduler.addOrReplaceAutoScaleRule(r); currRulesNames.remove(r.getRuleName()); } } if (!currRulesNames.isEmpty()) { for (String ruleName : currRulesNames) { logger.info("Removing autoscale rule " + ruleName); taskScheduler.removeAutoScaleRule(ruleName); } } } catch (Exception e) { logger.warn("Unexpected error updating cluster autoscale rules: " + e.getMessage()); } }, 1, 1, TimeUnit.MINUTES); } private TaskSchedulingService setupTaskSchedulingService(TaskScheduler taskScheduler) { TaskSchedulingService.Builder builder = new TaskSchedulingService.Builder() .withTaskScheduler(taskScheduler) .withLoopIntervalMillis(SCHEDULING_ITERATION_INTERVAL_MILLIS) .withMaxDelayMillis(MAX_DELAY_MILLIS_BETWEEN_SCHEDULING_ITER) // sort of rate limiting when no assignments were made and no new offers available .withSchedulingResultCallback(this::schedulingResultHandler) .withTaskQueue(taskQueue) .withOptimizingShortfallEvaluator(); return builder.build(); } private Optional<String> getAttribute(final VirtualMachineLease lease, final String attributeName) { boolean hasValue = lease.getAttributeMap() != null && lease.getAttributeMap().get(attributeName) != null && lease.getAttributeMap().get(attributeName).getText().hasValue(); return hasValue ? Optional.of(lease.getAttributeMap().get(attributeName).getText().getValue()) : Optional.empty(); } /** * Attempts to launch tasks given some number of leases from Mesos. * * When a task is launched successfully, the following will happen: * * 1. Emit a {@link WorkerLaunched} event to be handled by the corresponding actor. * 2. Makes a call to the underlying Mesos driver to launch the task. * * A task can fail to launch if: * * 1. It doesn't receive enough metadata for {@link WorkerPorts} to pass its preconditions. * - No launch task request will be made for this assignment result. * - Proactively unschedule the worker. * 2. It fails to emit a {@link WorkerLaunched} event. * - The worker will get unscheduled for this launch task request. * 3. There are no launch tasks for this assignment result. * - All of these leases are rejected. * - Eventually, the underlying Mesos driver will decline offers since there are no launch task requests. * * @param requests collection of assignment results received by the scheduler. * @param leases list of resource offers from Mesos. */ private void launchTasks(Collection<TaskAssignmentResult> requests, List<VirtualMachineLease> leases) { List<LaunchTaskRequest> launchTaskRequests = new ArrayList<>(); for (TaskAssignmentResult assignmentResult : requests) { ScheduleRequest request = (ScheduleRequest) assignmentResult.getRequest(); WorkerPorts workerPorts = null; try { workerPorts = new WorkerPorts(assignmentResult.getAssignedPorts()); } catch (IllegalArgumentException | IllegalStateException e) { logger.error("problem launching tasks for assignment result {}: {}", assignmentResult, e); numMissingWorkerPorts.increment(); } if (workerPorts != null) { boolean success = jobMessageRouter.routeWorkerEvent(new WorkerLaunched( request.getWorkerId(), request.getStageNum(), leases.get(0).hostname(), leases.get(0).getVMID(), getAttribute(leases.get(0), slaveClusterAttributeName), workerPorts)); if (success) { launchTaskRequests.add(new LaunchTaskRequest(request, workerPorts)); } else { unscheduleWorker(request.getWorkerId(), Optional.ofNullable(leases.get(0).hostname())); } } else { unscheduleWorker(request.getWorkerId(), Optional.ofNullable(leases.get(0).hostname())); } } if (launchTaskRequests.isEmpty()) { for (VirtualMachineLease l : leases) virtualMachineService.rejectLease(l); } Map<ScheduleRequest, LaunchTaskException> launchErrors = virtualMachineService.launchTasks(launchTaskRequests, leases); for (TaskAssignmentResult result : requests) { final ScheduleRequest sre = (ScheduleRequest) result.getRequest(); if (launchErrors.containsKey(sre)) { String errorMessage = getWorkerStringPrefix(sre.getStageNum(), sre.getWorkerId()) + " failed due to " + launchErrors.get(sre).getMessage(); boolean success = jobMessageRouter.routeWorkerEvent(new WorkerLaunchFailed(sre.getWorkerId(), sre.getStageNum(), errorMessage)); if (!success) { logger.warn("Failed to route WorkerLaunchFailed for {} (err {})", sre.getWorkerId(), errorMessage); } } } } private String getWorkerStringPrefix(int stageNum, final WorkerId workerId) { return "stage " + stageNum + " worker index=" + workerId.getWorkerIndex() + " number=" + workerId.getWorkerNum(); } private void schedulingResultHandler(SchedulingResult schedulingResult) { try { lastSchedulingResultCallback.set(System.currentTimeMillis()); final List<Exception> exceptions = schedulingResult.getExceptions(); for (Exception exc : exceptions) { logger.error("Scheduling result got exception: {}", exc.getMessage(), exc); schedulingResultExceptions.increment(); } int workersLaunched = 0; SchedulerCounters.getInstance().incrementResourceAllocationTrials(schedulingResult.getNumAllocations()); Map<String, VMAssignmentResult> assignmentResultMap = schedulingResult.getResultMap(); final int assignmentResultSize; if (assignmentResultMap != null) { assignmentResultSize = assignmentResultMap.size(); long now = System.currentTimeMillis(); for (Map.Entry<String, VMAssignmentResult> aResult : assignmentResultMap.entrySet()) { launchTasks(aResult.getValue().getTasksAssigned(), aResult.getValue().getLeasesUsed()); for (TaskAssignmentResult r : aResult.getValue().getTasksAssigned()) { final ScheduleRequest request = (ScheduleRequest) r.getRequest(); final Optional<Long> acceptedAt = workerRegistry.getAcceptedAt(request.getWorkerId()); acceptedAt.ifPresent(acceptedAtTime -> workerAcceptedToLaunchedDistMs.recordValue(now - acceptedAtTime)); perWorkerSchedulingTimeMs.increment(now - request.getReadyAt()); } workersLaunched += aResult.getValue().getTasksAssigned().size(); } } else { assignmentResultSize = 0; } // for workers that didn't get scheduled, rate limit them for (Map.Entry<TaskRequest, List<TaskAssignmentResult>> entry : schedulingResult.getFailures().entrySet()) { final ScheduleRequest req = (ScheduleRequest) entry.getKey(); boolean success = jobMessageRouter.routeWorkerEvent(new WorkerUnscheduleable(req.getWorkerId(), req.getStageNum())); if (!success) { logger.warn("Failed to route {} WorkerUnscheduleable event", req.getWorkerId()); if (logger.isTraceEnabled()) { logger.trace("Unscheduleable worker {} assignmentresults {}", req.getWorkerId(), entry.getValue()); } } } numWorkersLaunched.increment(workersLaunched); numResourceOffersReceived.increment(schedulingResult.getLeasesAdded()); numResourceAllocations.increment(schedulingResult.getNumAllocations()); numResourceOffersRejected.increment(schedulingResult.getLeasesRejected()); final int requestedWorkers = workersLaunched + schedulingResult.getFailures().size(); workersToLaunch.set(requestedWorkers); pendingWorkers.set(schedulingResult.getFailures().size()); schedulerRunMillis.set(schedulingResult.getRuntime()); totalActiveAgents.set(schedulingResult.getTotalVMsCount()); numAgentsUsed.increment(assignmentResultSize); final int idleVMsCount = schedulingResult.getIdleVMsCount(); idleAgents.set(idleVMsCount); SchedulerCounters.getInstance().endIteration(requestedWorkers, workersLaunched, assignmentResultSize, schedulingResult.getLeasesRejected()); if (requestedWorkers > 0 && SchedulerCounters.getInstance().getCounter().getIterationNumber() % 10 == 0) { logger.info("Scheduling iteration result: " + SchedulerCounters.getInstance().toJsonString()); } if (idleVMsCount != idleMachinesCount.get()) { logger.info("Idle machines: " + idleVMsCount); idleMachinesCount.set(idleVMsCount); } try { taskSchedulingService.requestVmCurrentStates(vmCurrentStates -> { if (lastVmCurrentStatesCheckDone.get() < (System.currentTimeMillis() - vmCurrentStatesCheckInterval)) { schedulingState.setVMCurrentState(vmCurrentStates); verifyAndReportResUsageMetrics(vmCurrentStates); lastVmCurrentStatesCheckDone.set(System.currentTimeMillis()); } }); } catch (final TaskQueueException e) { logger.warn("got exception requesting VM states from Fenzo", e); } publishJobManagerAndFenzoWorkerMetrics(); } catch (final Exception e) { logger.error("unexpected exception in scheduling result callback", e); schedulingCallbackExceptions.increment(); } } @Override public void initializeRunningWorker(final ScheduleRequest request, String hostname) { taskSchedulingService.initializeRunningTask(request, hostname); } @Override public void scheduleWorker(final ScheduleRequest scheduleRequest) { taskQueue.queueTask(scheduleRequest); } @Override public void unscheduleWorker(final WorkerId workerId, final Optional<String> hostname) { taskSchedulingService.removeTask(workerId.getId(), DEFAULT_Q_ATTRIBUTES, hostname.orElse(null)); } @Override public void unscheduleAndTerminateWorker(final WorkerId workerId, final Optional<String> hostname) { taskSchedulingService.removeTask(workerId.getId(), DEFAULT_Q_ATTRIBUTES, hostname.orElse(null)); virtualMachineService.killTask(workerId); } @Override public void updateWorkerSchedulingReadyTime(WorkerId workerId, long when) { if (logger.isTraceEnabled()) { logger.trace("setting task {} ready time to {}", workerId, new DateTime(when)); } taskSchedulingService.setTaskReadyTime(workerId.toString(), DEFAULT_Q_ATTRIBUTES, when); } @Override public void rescindOffer(final String offerId) { if (offerId.equals("ALL")) { taskScheduler.expireAllLeases(); } else { taskScheduler.expireLease(offerId); } } @Override public void addOffers(final List<VirtualMachineLease> offers) { taskSchedulingService.addLeases(offers); } @Override public void rescindOffers(final String hostname) { taskScheduler.expireAllLeases(hostname); } @Override public void disableVM(String hostname, long durationMillis) throws IllegalStateException { taskScheduler.disableVM(hostname, durationMillis); } @Override public void enableVM(final String hostname) { taskScheduler.enableVM(hostname); } @Override public List<VirtualMachineCurrentState> getCurrentVMState() { return schedulingState.getVMCurrentState(); } @Override public void setActiveVmGroups(final List<String> activeVmGroups) { if (activeVmGroups != null) { taskScheduler.setActiveVmGroups(activeVmGroups); } } private void setupSchedulingServiceWatcherMetric() { logger.info("Setting up SchedulingServiceWatcher metrics"); lastSchedulingResultCallback.set(System.currentTimeMillis()); final String metricGroup = "SchedulingServiceWatcher"; final GaugeCallback timeSinceLastSchedulingRunGauge = new GaugeCallback(new MetricId(metricGroup, "timeSinceLastSchedulingRunMs"), () -> (double) (System.currentTimeMillis() - lastSchedulingResultCallback.get()), SpectatorRegistryFactory.getRegistry()); final Metrics schedulingServiceWatcherMetrics = new Metrics.Builder() .id(metricGroup) .addGauge(timeSinceLastSchedulingRunGauge) .build(); MetricsRegistry.getInstance().registerAndGet(schedulingServiceWatcherMetrics); } @Override public void start() { super.awaitActiveModeAndStart(() -> { logger.info("Scheduling service starting now"); taskSchedulingService.start(); setupSchedulingServiceWatcherMetric(); if (logger.isDebugEnabled()) { try { taskSchedulingService.requestAllTasks(taskStateCollectionMap -> taskStateCollectionMap.forEach((state, tasks) -> { logger.debug("state {} tasks {}", state, tasks.toString()); })); } catch (TaskQueueException e) { logger.error("caught exception", e); } } }); } private void publishJobManagerAndFenzoWorkerMetrics() { try { taskSchedulingService.requestAllTasks(taskStateCollectionMap -> taskStateCollectionMap.forEach((state, tasks) -> { final int fenzoTaskSetSize = tasks.size(); if (state == TaskQueue.TaskState.LAUNCHED) { final int numRunningWorkers = workerRegistry.getNumRunningWorkers(); fenzoLaunchedTasks.set(fenzoTaskSetSize); jobMgrRunningWorkers.set(numRunningWorkers); if (numRunningWorkers != fenzoTaskSetSize) { logger.error("{} running workers as per Job Manager, {} tasks launched as per Fenzo", numRunningWorkers, fenzoTaskSetSize); if (logger.isDebugEnabled()) { final Set<String> jobMgrWorkers = workerRegistry.getAllRunningWorkers() .stream() .map(w -> w.getId()) .collect(Collectors.toSet()); final Set<String> fenzoWorkers = tasks .stream() .map(t -> t.getId()) .collect(Collectors.toSet()); final Sets.SetView<String> extraJobMgrWorkers = Sets.difference(jobMgrWorkers, fenzoWorkers); logger.debug("Job Manager workers not in Fenzo {}", extraJobMgrWorkers); final Sets.SetView<String> extraFenzoWorkers = Sets.difference(fenzoWorkers, jobMgrWorkers); logger.debug("Fenzo workers not in JobManagers {}", extraFenzoWorkers); } } } else { logger.debug("{} {} tasks {}", fenzoTaskSetSize, state, tasks); } })); } catch (Exception e) { logger.error("caught exception when publishing worker metrics", e); } } private void verifyAndReportResUsageMetrics(List<VirtualMachineCurrentState> vmCurrentStates) { double totalCPU = 0.0; double usedCPU = 0.0; double totalMemory = 0.0; double usedMemory = 0.0; double totalNwMbps = 0.0; double usedNwMbps = 0.0; for (VirtualMachineCurrentState state : vmCurrentStates) { final VirtualMachineLease currAvailableResources = state.getCurrAvailableResources(); if (currAvailableResources != null) { totalCPU += currAvailableResources.cpuCores(); totalMemory += currAvailableResources.memoryMB(); totalNwMbps += currAvailableResources.networkMbps(); } final Collection<TaskRequest> runningTasks = state.getRunningTasks(); if (runningTasks != null) { for (TaskRequest t : runningTasks) { Optional<WorkerId> workerId = WorkerId.fromId(t.getId()); if (!workerId.isPresent() || !workerRegistry.isWorkerValid(workerId.get())) { taskSchedulingService.removeTask(t.getId(), DEFAULT_Q_ATTRIBUTES, state.getHostname()); } else { usedCPU += t.getCPUs(); totalCPU += t.getCPUs(); usedMemory += t.getMemory(); totalMemory += t.getMemory(); usedNwMbps += t.getNetworkMbps(); totalNwMbps += t.getNetworkMbps(); } } } } totalAvailableCPUs.set((long) totalCPU); totalAllocatedCPUs.set((long) usedCPU); cpuUtilization.set((long) (usedCPU * 100.0 / totalCPU)); double DRU = usedCPU * 100.0 / totalCPU; totalAvailableMemory.set((long) totalMemory); totalAllocatedMemory.set((long) usedMemory); memoryUtilization.set((long) (usedMemory * 100.0 / totalMemory)); DRU = Math.max(DRU, usedMemory * 100.0 / totalMemory); totalAvailableNwMbps.set((long) totalNwMbps); totalAllocatedNwMbps.set((long) usedNwMbps); networkUtilization.set((long) (usedNwMbps * 100.0 / totalNwMbps)); DRU = Math.max(DRU, usedNwMbps * 100.0 / totalNwMbps); dominantResUtilization.set((long) DRU); } @Override public void shutdown() { if (!taskSchedulingService.isShutdown()) { logger.info("shutting down Task Scheduling Service"); taskSchedulingService.shutdown(); } } }
4,273
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/WorkerJobDetails.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.net.URL; public class WorkerJobDetails { private final String jobName; private final String jobId; private final String user; private final URL jobJarUrl; private final JobRequest request; public WorkerJobDetails(String user, String jobId, URL jobJarUrl, JobRequest request, String jobName) { this.user = user; this.jobId = jobId; this.jobJarUrl = jobJarUrl; this.request = request; this.jobName = jobName; } public String getUser() { return user; } public String getJobId() { return jobId; } public URL getJobJarUrl() { return jobJarUrl; } public JobRequest getRequest() { return request; } public MantisJobMgr getJobMgr() { return request.getJobMgr(); } public String getJobName() { return jobName; } @Override public String toString() { return "jobName=" + jobName + ", jobId=" + jobId + ", jobUrl=" + jobJarUrl; } }
4,274
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/ILeadershipManager.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import io.mantisrx.server.core.master.MasterDescription; public interface ILeadershipManager { // execute all actions when becoming a leader void becomeLeader(); // actions to execute when losing leadership void stopBeingLeader(); // am I the current leader? boolean isLeader(); // check if leader is bootstrapped and ready after becoming leader boolean isReady(); // set Leader is bootstrapped and ready void setLeaderReady(); // return MasterDescription of node executing this function MasterDescription getDescription(); }
4,275
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/LeaderRedirectionFilter.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.net.InetAddress; import java.net.UnknownHostException; import akka.http.javadsl.model.StatusCodes; import akka.http.javadsl.model.Uri; import akka.http.javadsl.server.AllDirectives; import akka.http.javadsl.server.Route; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.server.core.master.MasterDescription; import io.mantisrx.server.core.master.MasterMonitor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class LeaderRedirectionFilter extends AllDirectives { public static final Logger logger = LoggerFactory.getLogger(LeaderRedirectionFilter.class); private final MasterMonitor masterMonitor; private final ILeadershipManager leadershipManager; private final Counter api503MasterNotReady; private final Counter apiRedirectsToLeader; public LeaderRedirectionFilter(final MasterMonitor masterMonitor, final ILeadershipManager leadershipManager) { this.masterMonitor = masterMonitor; this.leadershipManager = leadershipManager; Metrics m = new Metrics.Builder() .id("LeaderRedirectionFilter") .addCounter("api503MasterNotReady") .addCounter("apiRedirectsToLeader") .build(); this.api503MasterNotReady = m.getCounter("api503MasterNotReady"); this.apiRedirectsToLeader = m.getCounter("apiRedirectsToLeader"); } private boolean isLocalHost(MasterDescription master) { try { InetAddress localHost = InetAddress.getLocalHost(); for (InetAddress addr : InetAddress.getAllByName(master.getHostname())) { if (addr.equals(localHost)) { return true; } } } catch (UnknownHostException e) { //logger.warn("Failed to compare if given master {} is local host: {}", master, e); return false; } return false; } public Route redirectIfNotLeader(final Route leaderRoute) { MasterDescription latestMaster = masterMonitor.getLatestMaster(); if (leadershipManager.isLeader() || isLocalHost(latestMaster)) { if (leadershipManager.isReady()) { return leaderRoute; } else { return extractUri(uri -> { logger.info("leader is not ready, returning 503 for {}", uri); api503MasterNotReady.increment(); return complete(StatusCodes.SERVICE_UNAVAILABLE, "Mantis master awaiting to be ready"); }); } } else { String hostname = latestMaster.getHostname(); int apiPort = latestMaster.getApiPort(); return extractUri(uri -> { Uri redirectUri = uri.host(hostname).port(apiPort); apiRedirectsToLeader.increment(); logger.info("redirecting request {} to leader", redirectUri.toString()); return redirect(redirectUri, StatusCodes.FOUND); }); } } }
4,276
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/InvalidJobRequest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; public class InvalidJobRequest extends Exception { private static final long serialVersionUID = 1L; private final JobRequest request; public InvalidJobRequest(JobRequest request, String msg) { super(msg); this.request = request; } public InvalidJobRequest(JobRequest request, Throwable e) { super(e); this.request = request; } public JobRequest getRequest() { return request; } }
4,277
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/ClusterFitnessCalculator.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master; import java.util.Optional; import com.netflix.fenzo.TaskRequest; import com.netflix.fenzo.TaskTrackerState; import com.netflix.fenzo.VMTaskFitnessCalculator; import com.netflix.fenzo.VirtualMachineCurrentState; import com.netflix.fenzo.VirtualMachineLease; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.scheduler.ScheduleRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ClusterFitnessCalculator implements VMTaskFitnessCalculator { private static final Logger logger = LoggerFactory.getLogger(ClusterFitnessCalculator.class); private final String clusterAttributeName; public ClusterFitnessCalculator() { clusterAttributeName = ConfigurationProvider.getConfig().getSlaveClusterAttributeName(); } private Optional<String> getAttribute(final VirtualMachineLease lease, final String attributeName) { boolean hasValue = lease.getAttributeMap() != null && lease.getAttributeMap().get(attributeName) != null && lease.getAttributeMap().get(attributeName).getText().hasValue(); return hasValue ? Optional.of(lease.getAttributeMap().get(attributeName).getText().getValue()) : Optional.empty(); } @Override public String getName() { return "Mantis Job Cluster Fitness Calculator"; } @Override public double calculateFitness(TaskRequest taskRequest, VirtualMachineCurrentState targetVM, TaskTrackerState taskTrackerState) { final Optional<String> preferredCluster = ((ScheduleRequest) taskRequest) .getPreferredCluster(); if (preferredCluster.isPresent()) { // task has a preferred cluster set, check if the preferred cluster matches the targetVM final Optional<String> targetVMCluster = getAttribute(targetVM.getCurrAvailableResources(), clusterAttributeName); if (!targetVMCluster.isPresent() || !targetVMCluster.get().equals(preferredCluster.get())) { // the target VM cluster is missing or does not match, not an ideal fit for this request if (logger.isDebugEnabled()) { logger.debug("preferred cluster {} targetVM cluster {}", preferredCluster.get(), targetVMCluster.orElse("missing")); } return 0.8; } } // the task request does not have a preference for a particular cluster or the targetVM cluster matches the preferred cluster // so this VM is a perfect fit, can defer to other fitness criteria for selection return 1.0; } }
4,278
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/config/ConfigurationFactory.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.config; /** * An implementation of this class should return an instance of {@link io.mantisrx.server.master.config.MasterConfiguration}. * We create this factory because it's possible that the logic of creating a {@link io.mantisrx.server.master.config.MasterConfiguration} * can change depending on the user or environment. * * @see ConfigurationProvider */ public interface ConfigurationFactory { MasterConfiguration getConfig(); }
4,279
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/config/ConfigurationProvider.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.config; /** * Provides static and global access to configuration objects. The method {@link ConfigurationProvider#initialize(ConfigurationFactory)} * must be called before this class can be used. * * @see io.mantisrx.server.master.config.ConfigurationFactory */ public class ConfigurationProvider { private static ConfigurationFactory factory; public static void initialize(ConfigurationFactory aFactory) { factory = aFactory; } // For testing only static ConfigurationFactory reset() { ConfigurationFactory current = factory; factory = null; return current; } /** * @return a {@link io.mantisrx.server.master.config.MasterConfiguration} object. * * @throws IllegalStateException if the method {@link ConfigurationProvider#initialize(ConfigurationFactory)} is not * called yet. */ public static MasterConfiguration getConfig() { if (factory == null) { throw new IllegalStateException(String.format("%s#initialize() must be called first. ", ConfigurationFactory.class.getName())); } return factory.getConfig(); } }
4,280
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/config/MasterConfiguration.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.config; import io.mantisrx.server.core.CoreConfiguration; import io.mantisrx.server.master.store.MantisStorageProvider; import org.skife.config.Config; import org.skife.config.Default; import org.skife.config.DefaultNull; public interface MasterConfiguration extends CoreConfiguration { @Config("mantis.master.consoleport") int getConsolePort(); @Config("mantis.master.apiport") int getApiPort(); @Config("mantis.master.schedInfoPort") int getSchedInfoPort(); @Config("mantis.master.apiportv2") int getApiPortV2(); @Config("mantis.master.metrics.port") int getMasterMetricsPort(); @Config("mantis.master.api.status.path") String getApiStatusUri(); @Config("mantis.master.storageProvider") MantisStorageProvider getStorageProvider(); @Config("mantis.master.host") @DefaultNull String getMasterHost(); @Config("mantis.master.ip") @DefaultNull String getMasterIP(); @Config("mesos.worker.timeoutSecondsToReportStart") @Default("10") int getTimeoutSecondsToReportStart(); @Config("mantis.master.leader.mismatch.retry.count") @Default("5") int getMasterLeaderMismatchRetryCount(); @Config("master.shutdown.curator.service.enabled") @Default("true") boolean getShutdownCuratorServiceEnabled(); @Config("mantis.master.api.route.ask.timeout.millis") @Default("1000") long getMasterApiAskTimeoutMs(); @Config("mesos.master.location") @Default("localhost:5050") String getMasterLocation(); @Config("mesos.worker.installDir") String getWorkerInstallDir(); @Config("mesos.worker.executorscript") @Default("startup.sh") String getWorkerExecutorScript(); @Config("mantis.worker.machine.definition.maxCpuCores") @Default("8") int getWorkerMachineDefinitionMaxCpuCores(); @Config("mantis.worker.machine.definition.maxMemoryMB") @Default("28000") int getWorkerMachineDefinitionMaxMemoryMB(); @Config("mantis.worker.machine.definition.maxNetworkMbps") @Default("1024") int getWorkerMachineDefinitionMaxNetworkMbps(); @Config("mantis.master.max.workers.per.stage") @Default("1500") int getMaxWorkersPerStage(); @Config("mantis.master.worker.jvm.memory.scale.back.percent") @Default("10") int getWorkerJvmMemoryScaleBackPercentage(); @Config("mesos.useSlaveFiltering") @Default("false") boolean getUseSlaveFiltering(); @Config("mesos.slaveFilter.attributeName") @Default("EC2_AMI_ID") String getSlaveFilterAttributeName(); @Config("mantis.master.active.slave.attribute.name") @Default("NETFLIX_AUTO_SCALE_GROUP") String getActiveSlaveAttributeName(); @Config("mantis.master.slave.cluster.attribute.name") @Default("CLUSTER_NAME") String getSlaveClusterAttributeName(); @Config("mantis.master.agent.fitness.cluster.weight") @Default("0.2") double getPreferredClusterFitnessWeight(); @Config("mantis.master.agent.fitness.durationtype.weight") @Default("0.5") double getDurationTypeFitnessWeight(); @Config("mantis.master.agent.fitness.binpacking.weight") @Default("0.3") double getBinPackingFitnessWeight(); // Threshold value compared should make sense with the 3 fitness weights above that aggregates the weighted results from // individual fitness calculators. @Config("mantis.master.agent.fitness.goodenough.threshold") @Default("0.63") double getFitnessGoodEnoughThreshold(); @Config("mantis.master.framework.name") @Default("MantisFramework") String getMantisFrameworkName(); @Config("mantis.worker.executor.name") @Default("Mantis Worker Executor") String getWorkerExecutorName(); @Config("mantis.master.mesos.failover.timeout.secs") @Default("604800.0") // 604800 secs = 1 week double getMesosFailoverTimeOutSecs(); // Sleep interval between consecutive scheduler iterations @Config("mantis.master.scheduler.iteration.interval.millis") @Default("50") long getSchedulerIterationIntervalMillis(); @Config("mantis.master.scheduler.disable.slave.duration.secs") @Default("60") long getDisableSlaveDurationSecs(); @Config("mantis.zookeeper.leader.election.path") String getLeaderElectionPath(); @Config("mantis.worker.heartbeat.interval.secs") @Default("60") long getWorkerTimeoutSecs(); @Config("mantis.worker.heartbeat.receipts.min.threshold.percent") @Default("55") double getHeartbeatReceiptsMinThresholdPercentage(); @Config("mantis.master.stage.assignment.refresh.interval.ms") @Default("1000") long getStageAssignmentRefreshIntervalMs(); @Config("mantis.worker.heartbeat.termination.enabled") @Default("true") boolean isHeartbeatTerminationEnabled(); @Config("mantis.worker.heartbeat.processing.enabled") @Default("true") boolean isHeartbeatProcessingEnabled(); @Config("mantis.interval.move.workers.disabled.vms.millis") @Default("60000") long getIntervalMoveWorkersOnDisabledVMsMillis(); @Config("mesos.task.reconciliation.interval.secs") @Default("300") long getMesosTaskReconciliationIntervalSecs(); @Config("mesos.lease.offer.expiry.secs") @Default("300") long getMesosLeaseOfferExpirySecs(); @Config("mantis.jobs.max.jars.per.named.job") @Default("10") int getMaximumNumberOfJarsPerJobName(); @Config("mantis.worker.resubmissions.maximum") @Default("100") int getMaximumResubmissionsPerWorker(); @Config("mantis.worker.resubmission.interval.secs") @Default("5:10:20") String getWorkerResubmitIntervalSecs(); @Config("mantis.worker.expire.resubmit.delay.secs") @Default("300") long getExpireWorkerResubmitDelaySecs(); @Config("mantis.worker.expire.resubmit.execution.interval.secs") @Default("120") long getExpireResubmitDelayExecutionIntervalSecs(); @Config("mantis.master.purge.frequency.secs") @Default("1200") long getCompletedJobPurgeFrequencySeqs(); @Config("mantis.master.purge.size") @Default("50") int getMaxJobsToPurge(); @Config("mantis.worker.state.launched.timeout.millis") @Default("7000") long getWorkerInLaunchedStateTimeoutMillis(); @Config("mantis.master.store.worker.writes.batch.size") @Default("100") int getWorkerWriteBatchSize(); @Config("mantis.master.ephemeral.job.unsubscribed.timeout.secs") @Default("300") long getEphemeralJobUnsubscribedTimeoutSecs(); @Config("mantis.master.init.timeout.secs") @Default("240") long getMasterInitTimeoutSecs(); @Config("mantis.master.terminated.job.to.delete.delay.hours") @Default("360") // 15 days * 24 hours long getTerminatedJobToDeleteDelayHours(); @Config("mantis.master.max.archived.jobs.to.cache") @Default("1000") int getMaxArchivedJobsToCache(); @Config("mesos.slave.attribute.zone.name") @Default("AWSZone") String getHostZoneAttributeName(); @Config("mantis.agent.cluster.autoscale.by.attribute.name") @Default("CLUSTER_NAME") String getAutoscaleByAttributeName(); @Config("mantis.agent.cluster.autoscaler.map.hostname.attribute.name") @Default("EC2_INSTANCE_ID") String getAutoScalerMapHostnameAttributeName(); @Config("mantis.agent.cluster.autoscaler.shortfall.evaluation.disabled") @Default("false") boolean getDisableShortfallEvaluation(); @Config("mantis.scheduling.info.observable.heartbeat.interval.secs") @Default("120") long getSchedulingInfoObservableHeartbeatIntervalSecs(); @Config("mantis.job.master.scheduling.info.cores") @Default("2.0") double getJobMasterCores(); @Config("mantis.job.master.scheduling.info.memoryMB") @Default("4096.0") double getJobMasterMemoryMB(); @Config("mantis.job.master.scheduling.info.networkMbps") @Default("128.0") double getJobMasterNetworkMbps(); @Config("mantis.job.master.scheduling.info.diskMB") @Default("100.0") double getJobMasterDiskMB(); @Config("mantis.master.api.cache.ttl.milliseconds") @Default("250") int getApiCacheTtlMilliseconds(); @Config("mantis.master.api.cache.size.max") @Default("50") int getApiCacheMaxSize(); @Config("mantis.master.api.cache.size.min") @Default("5") int getApiCacheMinSize(); }
4,281
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/config/StaticPropertiesConfigurationFactory.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.config; import java.util.Properties; import io.mantisrx.server.core.MetricsCoercer; import io.mantisrx.server.master.store.MantisStorageProvider; import org.skife.config.Coercer; import org.skife.config.Coercible; import org.skife.config.ConfigurationObjectFactory; public class StaticPropertiesConfigurationFactory implements ConfigurationFactory { private final ConfigurationObjectFactory delegate; private final MasterConfiguration config; public StaticPropertiesConfigurationFactory(Properties props) { delegate = new ConfigurationObjectFactory(props); delegate.addCoercible(new MetricsCoercer(props)); // delegate.addCoercible(new MantisPropertiesCoercer(props)); delegate.addCoercible(new Coercible<MantisStorageProvider>() { @Override public Coercer<MantisStorageProvider> accept(Class<?> clazz) { if (MantisStorageProvider.class.isAssignableFrom(clazz)) { return new Coercer<MantisStorageProvider>() { @Override public MantisStorageProvider coerce(String className) { try { return (MantisStorageProvider) Class.forName(className).newInstance(); } catch (Exception e) { throw new IllegalArgumentException( String.format( "The value %s is not a valid class name for %s implementation. ", className, MantisStorageProvider.class.getName() )); } } }; } return null; } }); config = delegate.build(MasterConfiguration.class); } @Override public MasterConfiguration getConfig() { return this.config; } @Override public String toString() { return "StaticPropertiesConfigurationFactory{" + "delegate=" + delegate + ", config=" + config + '}'; } }
4,282
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/agentdeploy/MigrationStrategyFactory.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.agentdeploy; import io.mantisrx.runtime.MigrationStrategy; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.server.master.utils.MantisSystemClock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class MigrationStrategyFactory { private static final Logger logger = LoggerFactory.getLogger(MigrationStrategyFactory.class); public static MigrationStrategy getStrategy(final String jobId, final WorkerMigrationConfig config) { switch (config.getStrategy()) { case PERCENTAGE: return new PercentageMigrationStrategy(MantisSystemClock.INSTANCE, jobId, config); case ONE_WORKER: return new OneWorkerPerTickMigrationStrategy(MantisSystemClock.INSTANCE, jobId, config); default: logger.error("unknown strategy type {} in config {}, using default strategy to migrate 25 percent every 1 min", config.getStrategy(), config); return new PercentageMigrationStrategy(MantisSystemClock.INSTANCE, jobId, new WorkerMigrationConfig( WorkerMigrationConfig.MigrationStrategyEnum.PERCENTAGE, "{\"percentToMove\":25,\"intervalMs\":60000}")); } } }
4,283
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/agentdeploy/PercentageMigrationStrategy.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.agentdeploy; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.ConcurrentSkipListSet; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.runtime.MigrationStrategy; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.utils.MantisClock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class PercentageMigrationStrategy extends MigrationStrategy { private static final Logger logger = LoggerFactory.getLogger(PercentageMigrationStrategy.class); private static final int DEFAULT_PERCENT_WORKERS = 10; private static final ObjectMapper objectMapper = new ObjectMapper(); static { objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); } private final MantisClock clock; private final String jobId; private final Configuration configuration; public PercentageMigrationStrategy(final MantisClock clock, final String jobId, final WorkerMigrationConfig config) { super(config); this.clock = clock; this.jobId = jobId; long defaultMigrationIntervalMs; try { defaultMigrationIntervalMs = ConfigurationProvider.getConfig().getIntervalMoveWorkersOnDisabledVMsMillis(); } catch (IllegalStateException ise) { logger.warn("Error reading intervalMoveWorkersOnDisabledVMsMillis from config Provider, will default to 1 minute"); defaultMigrationIntervalMs = 60_000L; } configuration = parseConfig(config.getConfigString(), defaultMigrationIntervalMs); } Configuration parseConfig(final String configuration, final long defaultMigrationIntervalMs) { try { return objectMapper.readValue(configuration, Configuration.class); } catch (IOException e) { logger.error("failed to parse config '{}' for job {}, default to {} percent workers migrated every {} millis", configuration, jobId, DEFAULT_PERCENT_WORKERS, defaultMigrationIntervalMs); return new Configuration(DEFAULT_PERCENT_WORKERS, defaultMigrationIntervalMs); } } @Override public List<Integer> execute(final ConcurrentSkipListSet<Integer> workersOnDisabledVms, final int numRunningWorkers, final int totalNumWorkers, final long lastWorkerMigrationTimestamp) { if (lastWorkerMigrationTimestamp > (clock.now() - configuration.getIntervalMs())) { return Collections.emptyList(); } if (workersOnDisabledVms.isEmpty()) { return Collections.emptyList(); } final int numWorkersOnDisabledVM = workersOnDisabledVms.size(); final int numInactiveWorkers = totalNumWorkers - numRunningWorkers; int numWorkersToMigrate = Math.min(numWorkersOnDisabledVM, Math.max(1, (int) Math.ceil(totalNumWorkers * configuration.getPercentToMove() / 100.0))); // If we already have inactive workers for the job, don't migrate more workers as we could end up with all workers in not running state for a job if (numInactiveWorkers >= numWorkersToMigrate) { logger.debug("[{}] num inactive workers {} > num workers to migrate {}, suppressing percent migrate", jobId, numInactiveWorkers, numWorkersToMigrate); return Collections.emptyList(); } else { // ensure no more than percentToMove workers for the job are in inactive state numWorkersToMigrate = numWorkersToMigrate - numInactiveWorkers; } final List<Integer> workersToMigrate = new ArrayList<>(numWorkersToMigrate); for (int i = numWorkersToMigrate; i > 0; i--) { final Integer workerToMigrate = workersOnDisabledVms.pollFirst(); if (workerToMigrate != null) { workersToMigrate.add(workerToMigrate); } } if (workersToMigrate.size() > 0) { logger.debug("migrating jobId {} workers {}", jobId, workersToMigrate); } return workersToMigrate; } public Configuration getConfiguration() { return configuration; } static class Configuration { private final int percentToMove; private final long intervalMs; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public Configuration(@JsonProperty("percentToMove") final int percentToMove, @JsonProperty("intervalMs") final long intervalMs) { this.percentToMove = percentToMove; this.intervalMs = intervalMs; } public int getPercentToMove() { return percentToMove; } public long getIntervalMs() { return intervalMs; } } }
4,284
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/agentdeploy/OneWorkerPerTickMigrationStrategy.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.agentdeploy; import java.util.Collections; import java.util.List; import java.util.concurrent.ConcurrentSkipListSet; import io.mantisrx.runtime.MigrationStrategy; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.utils.MantisClock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class OneWorkerPerTickMigrationStrategy extends MigrationStrategy { private static final Logger logger = LoggerFactory.getLogger(OneWorkerPerTickMigrationStrategy.class); private final String jobId; private final MantisClock clock; private long intervalMoveWorkersOnDisabledVMsMillis; public OneWorkerPerTickMigrationStrategy(final MantisClock clock, final String jobId, final WorkerMigrationConfig config) { super(config); this.clock = clock; this.jobId = jobId; try { this.intervalMoveWorkersOnDisabledVMsMillis = ConfigurationProvider.getConfig().getIntervalMoveWorkersOnDisabledVMsMillis(); } catch (IllegalStateException ise) { logger.warn("[{}] Error reading intervalMoveWorkersOnDisabledVMsMillis from config Provider, will default to 1 minute", jobId); this.intervalMoveWorkersOnDisabledVMsMillis = 60_000L; } } @Override public List<Integer> execute(final ConcurrentSkipListSet<Integer> workersOnDisabledVms, final int numRunningWorkers, final int totalNumWorkers, final long lastMovedWorkerOnDisabledVM) { if (lastMovedWorkerOnDisabledVM > (clock.now() - intervalMoveWorkersOnDisabledVMsMillis)) { return Collections.emptyList(); } final Integer workerNumber = workersOnDisabledVms.pollFirst(); if (workerNumber != null) { return Collections.singletonList(workerNumber); } else { return Collections.emptyList(); } } }
4,285
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/scheduler/ScheduleRequest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.scheduler; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import com.netflix.fenzo.ConstraintEvaluator; import com.netflix.fenzo.VMTaskFitnessCalculator; import com.netflix.fenzo.queues.QAttributes; import com.netflix.fenzo.queues.QueuableTask; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.server.core.domain.JobMetadata; import io.mantisrx.server.core.domain.WorkerId; public class ScheduleRequest implements QueuableTask { public static final QAttributes DEFAULT_Q_ATTRIBUTES = new QAttributes() { @Override public String getBucketName() { return "default"; } @Override public int getTierNumber() { return 0; } }; private static final String defaultGrpName = "defaultGrp"; private final WorkerId workerId; private final int stageNum; private final int numPortsRequested; private final JobMetadata jobMetadata; private final MantisJobDurationType durationType; private final MachineDefinition machineDefinition; private final List<ConstraintEvaluator> hardConstraints; private final List<VMTaskFitnessCalculator> softConstraints; private final Optional<String> preferredCluster; private volatile long readyAt; public ScheduleRequest(final WorkerId workerId, final int stageNum, final int numPortsRequested, final JobMetadata jobMetadata, final MantisJobDurationType durationType, final MachineDefinition machineDefinition, final List<ConstraintEvaluator> hardConstraints, final List<VMTaskFitnessCalculator> softConstraints, final long readyAt, final Optional<String> preferredCluster) { this.workerId = workerId; this.stageNum = stageNum; this.numPortsRequested = numPortsRequested; this.jobMetadata = jobMetadata; this.durationType = durationType; this.machineDefinition = machineDefinition; this.hardConstraints = hardConstraints; this.softConstraints = softConstraints; this.readyAt = readyAt; this.preferredCluster = preferredCluster; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ScheduleRequest that = (ScheduleRequest) o; return workerId != null ? workerId.equals(that.workerId) : that.workerId == null; } @Override public int hashCode() { return workerId != null ? workerId.hashCode() : 0; } @Override public String getId() { return workerId.getId(); } public WorkerId getWorkerId() { return workerId; } @Override public String taskGroupName() { return defaultGrpName; } @Override public double getCPUs() { return machineDefinition.getCpuCores(); } @Override public double getMemory() { return machineDefinition.getMemoryMB(); } @Override public double getNetworkMbps() { return machineDefinition.getNetworkMbps(); } @Override public double getDisk() { return machineDefinition.getDiskMB(); } @Override public int getPorts() { return numPortsRequested; } public JobMetadata getJobMetadata() { return jobMetadata; } public MachineDefinition getMachineDefinition() { return machineDefinition; } @Override public Map<String, Double> getScalarRequests() { return Collections.emptyMap(); } @Override public Map<String, NamedResourceSetRequest> getCustomNamedResources() { return Collections.emptyMap(); } @Override public List<ConstraintEvaluator> getHardConstraints() { return hardConstraints; } @Override public List<VMTaskFitnessCalculator> getSoftConstraints() { return softConstraints; } @Override public AssignedResources getAssignedResources() { // not used by Mantis return null; } @Override public void setAssignedResources(AssignedResources assignedResources) { // no-op Not using them at this time } public MantisJobDurationType getDurationType() { return durationType; } public int getStageNum() { return stageNum; } @Override public QAttributes getQAttributes() { return DEFAULT_Q_ATTRIBUTES; } @Override public long getReadyAt() { return readyAt; } @Override public void safeSetReadyAt(long when) { readyAt = when; } public Optional<String> getPreferredCluster() { return preferredCluster; } @Override public String toString() { return "ScheduleRequest{" + "workerId=" + workerId + ", readyAt=" + readyAt + '}'; } }
4,286
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/scheduler/WorkerLaunched.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.scheduler; import java.util.Objects; import java.util.Optional; import io.mantisrx.common.WorkerPorts; import io.mantisrx.server.core.domain.WorkerId; public class WorkerLaunched implements WorkerEvent { private final WorkerId workerId; private final int stageNum; private final String hostname; private final String vmId; private final Optional<String> clusterName; private final WorkerPorts ports; private final long eventTimeMs = System.currentTimeMillis(); public WorkerLaunched(final WorkerId workerId, final int stageNum, final String hostname, final String vmId, final Optional<String> clusterName, final WorkerPorts ports) { this.workerId = workerId; this.stageNum = stageNum; this.hostname = hostname; this.vmId = vmId; this.clusterName = clusterName; this.ports = ports; } @Override public WorkerId getWorkerId() { return workerId; } public int getStageNum() { return stageNum; } public String getHostname() { return hostname; } public String getVmId() { return vmId; } public Optional<String> getClusterName() { return clusterName; } public WorkerPorts getPorts() { return ports; } @Override public long getEventTimeMs() { return eventTimeMs; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WorkerLaunched that = (WorkerLaunched) o; return stageNum == that.stageNum && eventTimeMs == that.eventTimeMs && Objects.equals(workerId, that.workerId) && Objects.equals(hostname, that.hostname) && Objects.equals(vmId, that.vmId) && Objects.equals(clusterName, that.clusterName) && Objects.equals(ports, that.ports); } @Override public int hashCode() { return Objects.hash(workerId, stageNum, hostname, vmId, clusterName, ports, eventTimeMs); } @Override public String toString() { return "WorkerLaunched{" + "workerId=" + workerId + ", stageNum=" + stageNum + ", hostname='" + hostname + '\'' + ", vmId='" + vmId + '\'' + ", clusterName=" + clusterName + ", ports=" + ports + ", eventTimeMs=" + eventTimeMs + '}'; } }
4,287
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/scheduler/JobMessageRouter.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.scheduler; public interface JobMessageRouter { /** * @param workerEvent worker event from Scheduler * * @return true if workerEvent was handled successfully, otherwise false */ boolean routeWorkerEvent(final WorkerEvent workerEvent); }
4,288
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/scheduler/WorkerRegistry.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.scheduler; import java.util.Map; import java.util.Optional; import java.util.Set; import io.mantisrx.server.core.domain.WorkerId; public interface WorkerRegistry { /* Returns number of workers in LAUNCHED, START_INITIATED and STARTED state */ int getNumRunningWorkers(); /* Returns the set of all workers in LAUNCHED, START_INITIATED and STARTED state */ Set<WorkerId> getAllRunningWorkers(); /* Returns the map of all workers to SlaveId in LAUNCHED, START_INITIATED and STARTED state */ Map<WorkerId, String> getAllRunningWorkerSlaveIdMappings(); /** * @param workerId id to check * * @return false is job/worker is in Terminal State, otherwise true */ boolean isWorkerValid(final WorkerId workerId); /** * Get time at which the worker was Accepted * * @param workerId Worker ID * * @return time when worker was Accepted */ Optional<Long> getAcceptedAt(final WorkerId workerId); }
4,289
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/scheduler/MantisScheduler.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.scheduler; import java.util.List; import java.util.Optional; import com.netflix.fenzo.VirtualMachineCurrentState; import com.netflix.fenzo.VirtualMachineLease; import io.mantisrx.server.core.domain.WorkerId; public interface MantisScheduler { /** * Add a worker to the Scheduler queue * * @param scheduleRequest worker to schedule */ void scheduleWorker(final ScheduleRequest scheduleRequest); /** * Mark the worker to be removed from the Scheduling queue. This is expected to be called for all tasks that were added to the Scheduler, whether or * not the worker is already running. If the worker is running, the <code>hostname</code> parameter must be set, otherwise, * it can be <code>Optional.empty()</code>. The actual remove operation is performed before the start of the next scheduling * iteration. * * @param workerId The Id of the worker to be removed. * @param hostname The name of the VM where the worker was assigned resources from, or, <code>Optional.empty()</code> if it was * not assigned any resources. */ void unscheduleWorker(final WorkerId workerId, final Optional<String> hostname); /** * Mark the worker to be removed from the Scheduling queue and terminate the running container. This is expected to be called for all tasks that were added to the Scheduler, whether or * not the worker is already running. If the worker is running, the <code>hostname</code> parameter must be set, otherwise, * it can be <code>Optional.empty()</code>. The actual remove operation is performed before the start of the next scheduling * iteration. * * @param workerId The Id of the worker to be removed. * @param hostname The name of the VM where the worker was assigned resources from, or, <code>Optional.empty()</code> if it was * not assigned any resources. */ void unscheduleAndTerminateWorker(final WorkerId workerId, final Optional<String> hostname); /** * Set the wall clock time when this worker is ready for consideration for resource allocation. * * @param workerId The Id of the task. * @param when The wall clock time in millis when the task is ready for consideration for assignment. */ void updateWorkerSchedulingReadyTime(final WorkerId workerId, final long when); /** * Mark the given workers as running. This is expected to be called for all workers that were already running from before * {@link com.netflix.fenzo.TaskSchedulingService} started running. For example, when the scheduling service * is being started after a restart of the system and there were some workers launched in the previous run of * the system. Any workers assigned resources during scheduling invoked by this service will be automatically marked * as running. * <p> * * @param scheduleRequest The scheduleRequest(worker) to mark as running * @param hostname The name of the VM that the task is running on. */ void initializeRunningWorker(final ScheduleRequest scheduleRequest, final String hostname); /** * Informs the scheduler that the offer has been revoked. Typically called by the Resource Manager * * @param offerId ID of the offer being revoked */ void rescindOffer(final String offerId); /** * Informs the scheduler to reject all offers for this hostname. * * @param hostname host */ void rescindOffers(final String hostname); /** * Informs the scheduler of new offers received from the Resource Manager * * @param offers new offers from Resource Manager */ void addOffers(final List<VirtualMachineLease> offers); /** * Reject offers from this host for durationMillis * * @param hostname host to disable * @param durationMillis duration in milliseconds * * @throws IllegalStateException */ void disableVM(final String hostname, final long durationMillis) throws IllegalStateException; /** * Enable a host to allow using its resource offers for task assignment, only required if the host was explicitly disabled * * @param hostname host to enable */ void enableVM(final String hostname); /** * Get the current states of all known VMs. */ List<VirtualMachineCurrentState> getCurrentVMState(); /** * Set the list of VM group names that are active. VMs (hosts) that belong to groups that you do not include * in this list are considered disabled. The scheduler does not use the resources of disabled hosts when it * allocates tasks. If you pass in a null list, this indicates that the scheduler should consider all groups * to be enabled. * * @param activeVmGroups a list of VM group names that the scheduler is to consider to be enabled, or {@code null} * if the scheduler is to consider every group to be enabled */ void setActiveVmGroups(final List<String> activeVmGroups); }
4,290
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/scheduler/SchedulingStateManager.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.scheduler; import java.util.List; import java.util.concurrent.atomic.AtomicReference; import com.netflix.fenzo.VirtualMachineCurrentState; public class SchedulingStateManager { private final AtomicReference<List<VirtualMachineCurrentState>> lastKnownVMState = new AtomicReference<>(null); public List<VirtualMachineCurrentState> getVMCurrentState() { return lastKnownVMState.get(); } public void setVMCurrentState(final List<VirtualMachineCurrentState> latestState) { lastKnownVMState.set(latestState); } }
4,291
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/scheduler/WorkerLaunchFailed.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.scheduler; import java.util.Objects; import io.mantisrx.server.core.domain.WorkerId; public class WorkerLaunchFailed implements WorkerEvent { private final WorkerId workerId; private final int stageNum; private final String errorMessage; private final long eventTimeMs = System.currentTimeMillis(); public WorkerLaunchFailed(final WorkerId workerId, final int stageNum, final String errorMessage) { this.workerId = workerId; this.stageNum = stageNum; this.errorMessage = errorMessage; } @Override public WorkerId getWorkerId() { return workerId; } public int getStageNum() { return stageNum; } public String getErrorMessage() { return errorMessage; } @Override public long getEventTimeMs() { return eventTimeMs; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WorkerLaunchFailed that = (WorkerLaunchFailed) o; return stageNum == that.stageNum && eventTimeMs == that.eventTimeMs && Objects.equals(workerId, that.workerId) && Objects.equals(errorMessage, that.errorMessage); } @Override public int hashCode() { return Objects.hash(workerId, stageNum, errorMessage, eventTimeMs); } @Override public String toString() { return "WorkerLaunchFailed{" + "workerId=" + workerId + ", stageNum=" + stageNum + ", errorMessage='" + errorMessage + '\'' + ", eventTimeMs=" + eventTimeMs + '}'; } }
4,292
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/scheduler/WorkerResourceStatus.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.scheduler; import java.util.Objects; import io.mantisrx.server.core.domain.WorkerId; public class WorkerResourceStatus implements WorkerEvent { private final WorkerId workerId; private final String message; private final VMResourceState state; private final long eventTimeMs = System.currentTimeMillis(); public WorkerResourceStatus(final WorkerId workerId, final String message, final VMResourceState state) { this.workerId = workerId; this.message = message; this.state = state; } @Override public WorkerId getWorkerId() { return workerId; } public String getMessage() { return message; } public VMResourceState getState() { return state; } @Override public long getEventTimeMs() { return eventTimeMs; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WorkerResourceStatus that = (WorkerResourceStatus) o; return eventTimeMs == that.eventTimeMs && Objects.equals(workerId, that.workerId) && Objects.equals(message, that.message) && state == that.state; } @Override public int hashCode() { return Objects.hash(workerId, message, state, eventTimeMs); } @Override public String toString() { return "WorkerResourceStatus{" + "workerId=" + workerId + ", message='" + message + '\'' + ", state=" + state + ", eventTimeMs=" + eventTimeMs + '}'; } public enum VMResourceState { STARTED, START_INITIATED, COMPLETED, FAILED } }
4,293
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/scheduler/LaunchTaskRequest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.scheduler; import io.mantisrx.common.WorkerPorts; public class LaunchTaskRequest { private final ScheduleRequest scheduleRequest; private final WorkerPorts ports; public LaunchTaskRequest(ScheduleRequest scheduleRequest, WorkerPorts ports) { this.scheduleRequest = scheduleRequest; this.ports = ports; } public ScheduleRequest getScheduleRequest() { return scheduleRequest; } public WorkerPorts getPorts() { return ports; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; LaunchTaskRequest that = (LaunchTaskRequest) o; if (scheduleRequest != null ? !scheduleRequest.equals(that.scheduleRequest) : that.scheduleRequest != null) return false; return ports != null ? ports.equals(that.ports) : that.ports == null; } @Override public int hashCode() { int result = scheduleRequest != null ? scheduleRequest.hashCode() : 0; result = 31 * result + (ports != null ? ports.hashCode() : 0); return result; } @Override public String toString() { return "LaunchTaskRequest{" + "scheduleRequest=" + scheduleRequest + ", ports=" + ports + '}'; } }
4,294
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/scheduler/WorkerEvent.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.scheduler; import io.mantisrx.server.core.domain.WorkerId; public interface WorkerEvent { WorkerId getWorkerId(); // Return the time at which this event was created long getEventTimeMs(); }
4,295
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/scheduler/WorkerUnscheduleable.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.scheduler; import java.util.Objects; import io.mantisrx.server.core.domain.WorkerId; public class WorkerUnscheduleable implements WorkerEvent { private final WorkerId workerId; private final int stageNum; private final long eventTimeMs = System.currentTimeMillis(); public WorkerUnscheduleable(final WorkerId workerId, final int stageNum) { this.workerId = workerId; this.stageNum = stageNum; } @Override public WorkerId getWorkerId() { return workerId; } public int getStageNum() { return stageNum; } @Override public long getEventTimeMs() { return eventTimeMs; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WorkerUnscheduleable that = (WorkerUnscheduleable) o; return stageNum == that.stageNum && eventTimeMs == that.eventTimeMs && Objects.equals(workerId, that.workerId); } @Override public int hashCode() { return Objects.hash(workerId, stageNum, eventTimeMs); } @Override public String toString() { return "WorkerUnscheduleable{" + "workerId=" + workerId + ", stageNum=" + stageNum + ", eventTimeMs=" + eventTimeMs + '}'; } }
4,296
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/scheduler/WorkerOnDisabledVM.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.scheduler; import java.util.Objects; import io.mantisrx.server.core.domain.WorkerId; public class WorkerOnDisabledVM implements WorkerEvent { private final WorkerId workerId; private final long eventTimeMs = System.currentTimeMillis(); public WorkerOnDisabledVM(final WorkerId workerId) { this.workerId = workerId; } @Override public WorkerId getWorkerId() { return workerId; } @Override public long getEventTimeMs() { return eventTimeMs; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WorkerOnDisabledVM that = (WorkerOnDisabledVM) o; return eventTimeMs == that.eventTimeMs && Objects.equals(workerId, that.workerId); } @Override public int hashCode() { return Objects.hash(workerId, eventTimeMs); } @Override public String toString() { return "WorkerOnDisabledVM{" + "workerId=" + workerId + ", eventTimeMs=" + eventTimeMs + '}'; } }
4,297
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/utils/MantisSystemClock.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.utils; public class MantisSystemClock implements MantisClock { public static final MantisSystemClock INSTANCE = new MantisSystemClock(); @Override public long now() { return System.currentTimeMillis(); } }
4,298
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/utils/MantisClock.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.utils; public interface MantisClock { /** * @return the difference, measured in milliseconds, between * the current time and midnight, January 1, 1970 UTC. */ long now(); }
4,299