index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/domain/Jar.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.net.URL;
public class Jar {
private final URL url;
private final String version;
private final long uploadedAt;
private final SchedulingInfo schedulingInfo;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public Jar(@JsonProperty("url") URL url,
@JsonProperty("uploadedAt") long uploadedAt,
@JsonProperty("version") String version,
@JsonProperty("schedulingInfo") SchedulingInfo schedulingInfo) {
this.url = url;
this.uploadedAt = uploadedAt;
this.version = (version == null || version.isEmpty()) ?
"" + System.currentTimeMillis() :
version;
this.schedulingInfo = schedulingInfo;
}
public URL getUrl() {
return url;
}
public long getUploadedAt() {
return uploadedAt;
}
public String getVersion() {
return version;
}
public SchedulingInfo getSchedulingInfo() {
return schedulingInfo;
}
}
| 8,000 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/domain/SLA.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import com.netflix.fenzo.triggers.CronTrigger;
import com.netflix.fenzo.triggers.TriggerOperator;
import com.netflix.fenzo.triggers.exceptions.SchedulerException;
import com.netflix.fenzo.triggers.exceptions.TriggerNotFoundException;
import io.mantisrx.server.master.store.NamedJob;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SLA {
private static final Logger logger = LoggerFactory.getLogger(SLA.class);
@JsonIgnore
private static final int MaxValueForSlaMin = 5;
@JsonIgnore
private static final int MaxValueForSlaMax = 100;
@JsonIgnore
private static final TriggerOperator triggerOperator;
static {
triggerOperator = new TriggerOperator(1);
try {
triggerOperator.initialize();
} catch (SchedulerException e) {
logger.error("Unexpected: " + e.getMessage(), e);
throw new RuntimeException(e);
}
}
private final int min;
private final int max;
private final String cronSpec;
private final IJobClusterDefinition.CronPolicy cronPolicy;
@JsonIgnore
private final boolean hasCronSpec;
@JsonIgnore
private final IJobClusterDefinition.CronPolicy defaultPolicy = IJobClusterDefinition.CronPolicy.KEEP_EXISTING;
@JsonIgnore
private CronTrigger<NamedJob> scheduledTrigger;
@JsonIgnore
private String triggerGroup = null;
@JsonIgnore
private String triggerId = null;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public SLA(
@JsonProperty("min") int min,
@JsonProperty("max") int max,
@JsonProperty("cronSpec") String cronSpec,
@JsonProperty("cronPolicy") IJobClusterDefinition.CronPolicy cronPolicy
) {
if (cronSpec != null && !cronSpec.isEmpty()) {
this.cronSpec = cronSpec;
hasCronSpec = true;
this.max = 1;
this.min = 0;
this.cronPolicy = cronPolicy == null ? defaultPolicy : cronPolicy;
} else {
hasCronSpec = false;
this.min = min;
this.max = max;
this.cronSpec = null;
this.cronPolicy = null;
}
validate();
}
public int getMin() {
return min;
}
public int getMax() {
return max;
}
public String getCronSpec() {
return cronSpec;
}
public JobClusterDefinitionImpl.CronPolicy getCronPolicy() {
return cronPolicy;
}
private void validate() throws IllegalArgumentException {
if (max < min)
throw new IllegalArgumentException("Cannot have max=" + max + " < min=" + min);
if (min > MaxValueForSlaMin)
throw new IllegalArgumentException("Specified min sla value " + min + " cannot be >" + MaxValueForSlaMin);
if (max > MaxValueForSlaMax)
throw new IllegalArgumentException("Max sla value " + max + " cannot be >" + MaxValueForSlaMax);
}
// caller must lock to avoid concurrent access with destroyCron()
private void initCron(NamedJob job) throws SchedulerException {
if (!hasCronSpec || triggerId != null)
return;
logger.info("Init'ing cron for " + job.getName());
triggerGroup = job.getName() + "-" + this;
try {
scheduledTrigger = new CronTrigger<>(cronSpec, job.getName(), job, NamedJob.class, NamedJob.CronTriggerAction.class);
triggerId = triggerOperator.registerTrigger(triggerGroup, scheduledTrigger);
} catch (IllegalArgumentException e) {
throw new SchedulerException(e.getMessage(), e);
}
}
// caller must lock to avoid concurrent access with initCron()
private void destroyCron() {
try {
if (triggerId != null) {
logger.info("Destroying cron " + triggerId);
triggerOperator.deleteTrigger(triggerGroup, triggerId);
triggerId = null;
}
} catch (TriggerNotFoundException | SchedulerException e) {
logger.warn("Couldn't delete trigger group " + triggerGroup + ", id " + triggerId);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((cronPolicy == null) ? 0 : cronPolicy.hashCode());
result = prime * result + ((cronSpec == null) ? 0 : cronSpec.hashCode());
result = prime * result + ((defaultPolicy == null) ? 0 : defaultPolicy.hashCode());
result = prime * result + (hasCronSpec ? 1231 : 1237);
result = prime * result + max;
result = prime * result + min;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SLA other = (SLA) obj;
if (cronPolicy != other.cronPolicy)
return false;
if (cronSpec == null) {
if (other.cronSpec != null)
return false;
} else if (!cronSpec.equals(other.cronSpec))
return false;
if (defaultPolicy != other.defaultPolicy)
return false;
if (hasCronSpec != other.hasCronSpec)
return false;
if (max != other.max)
return false;
return min == other.min;
}
@Override
public String toString() {
return "SLA [min=" + min + ", max=" + max + ", cronSpec=" + cronSpec + ", cronPolicy=" + cronPolicy
+ ", hasCronSpec=" + hasCronSpec + ", defaultPolicy=" + defaultPolicy + ", scheduledTrigger="
+ scheduledTrigger + ", triggerGroup=" + triggerGroup + ", triggerId=" + triggerId + "]";
}
}
| 8,001 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/domain/DataFormatAdapter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import static java.util.Optional.empty;
import static java.util.Optional.of;
import static java.util.Optional.ofNullable;
import io.mantisrx.common.Label;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.jobcluster.IJobClusterMetadata;
import io.mantisrx.master.jobcluster.JobClusterMetadataImpl;
import io.mantisrx.master.jobcluster.job.FilterableMantisJobMetadataWritable;
import io.mantisrx.master.jobcluster.job.FilterableMantisStageMetadataWritable;
import io.mantisrx.master.jobcluster.job.FilterableMantisWorkerMetadataWritable;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.master.jobcluster.job.IMantisStageMetadata;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl;
import io.mantisrx.master.jobcluster.job.MantisStageMetadataImpl;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.MantisJobDefinition;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.NamedJobDefinition;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.server.master.MantisJobOperations;
import io.mantisrx.server.master.MantisJobStatus;
import io.mantisrx.server.master.http.api.JobClusterInfo;
import io.mantisrx.server.master.store.InvalidNamedJobException;
import io.mantisrx.server.master.store.MantisJobMetadata;
import io.mantisrx.server.master.store.MantisJobMetadataWritable;
import io.mantisrx.server.master.store.MantisStageMetadata;
import io.mantisrx.server.master.store.MantisStageMetadataWritable;
import io.mantisrx.server.master.store.MantisWorkerMetadata;
import io.mantisrx.server.master.store.MantisWorkerMetadataWritable;
import io.mantisrx.server.master.store.NamedJob;
import io.mantisrx.server.master.store.NamedJobDeleteException;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.time.Instant;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Action1;
public class DataFormatAdapter {
private static final Logger logger = LoggerFactory.getLogger(DataFormatAdapter.class);
public static NamedJob convertJobClusterMetadataToNamedJob(IJobClusterMetadata jobCluster) {
return new NamedJob(new NoOpMantisJobOperations(),
jobCluster.getJobClusterDefinition().getName(),
convertJobClusterConfigsToJars(jobCluster.getJobClusterDefinition().getJobClusterConfigs()),
convertSLAToNamedJobSLA(jobCluster.getJobClusterDefinition().getSLA()),
jobCluster.getJobClusterDefinition().getParameters(),
jobCluster.getJobClusterDefinition().getOwner(),
jobCluster.getLastJobCount(),
jobCluster.isDisabled(),
jobCluster.getJobClusterDefinition().getIsReadyForJobMaster(),
jobCluster.getJobClusterDefinition().getWorkerMigrationConfig(),
jobCluster.getJobClusterDefinition().getLabels());
}
public static NamedJob.CompletedJob convertCompletedJobToNamedJobCompletedJob(JobClusterDefinitionImpl.CompletedJob cJob) {
return new NamedJob.CompletedJob(cJob.getName(), cJob.getJobId(), cJob.getVersion(), DataFormatAdapter.convertToMantisJobState(cJob.getState()), cJob.getSubmittedAt(), cJob.getTerminatedAt(), cJob.getUser(), cJob.getLabelList());
}
public static JobClusterDefinitionImpl.CompletedJob convertNamedJobCompletedJobToCompletedJob(NamedJob.CompletedJob completedJob) {
return new JobClusterDefinitionImpl.CompletedJob(completedJob.getName(),completedJob.getJobId(),completedJob.getVersion(),DataFormatAdapter.convertMantisJobStateToJobState(completedJob.getState()),completedJob.getSubmittedAt(),completedJob.getTerminatedAt(),completedJob.getUser(), completedJob.getLabels());
}
public static IJobClusterMetadata convertNamedJobToJobClusterMetadata(NamedJob nJob) {
return new JobClusterMetadataImpl.Builder()
.withIsDisabled(nJob.getDisabled())
.withLastJobCount(nJob.getLastJobCount())
.withJobClusterDefinition(new JobClusterDefinitionImpl.Builder()
.withIsReadyForJobMaster(nJob.getIsReadyForJobMaster())
.withMigrationConfig(nJob.getMigrationConfig())
.withName(nJob.getName())
.withOwner(ofNullable(nJob.getOwner()).orElse(new JobOwner("unknown","unknown","","email@netflix.com","norepo")))
.withSla(DataFormatAdapter.convertToSLA(nJob.getSla()))
.withLabels(nJob.getLabels())
.withParameters(nJob.getParameters())
.withJobClusterConfigs(DataFormatAdapter.convertJarsToJobClusterConfigs(nJob.getJars()))
.build())
.build();
}
public static List<NamedJob.Jar> convertJobClusterConfigsToJars(List<JobClusterConfig> jobClusterConfigs) {
Preconditions.checkNotNull(jobClusterConfigs);
List<NamedJob.Jar> jarList = new ArrayList<>(jobClusterConfigs.size());
jobClusterConfigs.stream().forEach((jConfig) -> {
try {
jarList.add(convertJobClusterConfigToJar(jConfig));
} catch (MalformedURLException e) {
logger.warn("Exception {} transforming {}", e.getMessage(), jConfig);
}
});
return jarList;
}
public static List<JobClusterConfig> convertJarsToJobClusterConfigs(List<NamedJob.Jar> jars) {
Preconditions.checkNotNull(jars);
List<JobClusterConfig> configs = new ArrayList<>(jars.size());
jars.stream().forEach((jar) -> {
try {
configs.add(convertJarToJobClusterConfig(jar));
} catch(Exception e) {
logger.warn("Exception loading config {}. Skipping...", jar);
}
});
return configs;
}
public static NamedJob.Jar convertJobClusterConfigToJar(JobClusterConfig jConfig) throws MalformedURLException {
SchedulingInfo sInfo = jConfig.getSchedulingInfo();
String name = jConfig.getArtifactName();
long uploadedAt = jConfig.getUploadedAt();
String version = jConfig.getVersion();
return new NamedJob.Jar(generateURL(name), uploadedAt, version, sInfo);
}
public static JobClusterConfig convertJarToJobClusterConfig(NamedJob.Jar jar ) {
Preconditions.checkNotNull(jar);
Optional<String> artifactName = extractArtifactName(jar.getUrl());
String version = jar.getVersion();
return new JobClusterConfig.Builder()
.withArtifactName(artifactName.orElse(""))
.withVersion(version)
.withSchedulingInfo(jar.getSchedulingInfo())
.withUploadedAt(jar.getUploadedAt())
.build();
}
public static URL generateURL(String artifactName) throws MalformedURLException {
Preconditions.checkNotNull(artifactName, "Artifact Name cannot be null");
if(!artifactName.startsWith("http") ) {
return new URL("http://" + artifactName);
}
return new URL(artifactName);
}
public static Optional<String> extractArtifactName(String jarStr) {
// http://somehose/my-artifact-name-0.0.1.zip
//http://mantisui.eu-west-1.dyntest.netflix.net/mantis-artifacts/nfmantis-sources-genericqueryable-source-6.0.8.zip
if(jarStr != null && !jarStr.isEmpty()) {
int lastIndexOfForwardSlash = jarStr.lastIndexOf('/');
if (lastIndexOfForwardSlash != -1) {
String artifactName = jarStr.substring(lastIndexOfForwardSlash + 1, jarStr.length());
return of(artifactName);
}
}
logger.warn("Could not extract artifactName from " + jarStr);
return empty();
}
public static Optional<String> extractArtifactName(URL jar) {
if(jar != null) {
String jarStr = jar.toString();
return extractArtifactName(jarStr);
}
return empty();
}
public static NamedJob.SLA convertSLAToNamedJobSLA(io.mantisrx.server.master.domain.SLA sla) {
return new NamedJob.SLA(sla.getMin(), sla.getMax(), sla.getCronSpec(), convertToNamedJobDefinitionCronPolicy(sla.getCronPolicy()));
}
public static NamedJobDefinition.CronPolicy convertToNamedJobDefinitionCronPolicy(IJobClusterDefinition.CronPolicy cPolicy) {
if(cPolicy != null) {
switch (cPolicy) {
case KEEP_EXISTING:
return NamedJobDefinition.CronPolicy.KEEP_EXISTING;
case KEEP_NEW:
return NamedJobDefinition.CronPolicy.KEEP_NEW;
default:
return NamedJobDefinition.CronPolicy.KEEP_EXISTING;
}
}
return NamedJobDefinition.CronPolicy.KEEP_NEW;
}
public static MantisWorkerMetadataWritable convertMantisWorkerMetadataToMantisWorkerMetadataWritable(IMantisWorkerMetadata workerMeta) {
MantisWorkerMetadataWritable writable = new MantisWorkerMetadataWritable(workerMeta.getWorkerIndex(),
workerMeta.getWorkerNumber(),
workerMeta.getJobId(),
workerMeta.getStageNum(),
workerMeta.getNumberOfPorts());
setWorkerMetadataWritable(writable, workerMeta);
return writable;
}
public static FilterableMantisWorkerMetadataWritable convertMantisWorkerMetadataToFilterableMantisWorkerMetadataWritable(IMantisWorkerMetadata workerMeta) {
FilterableMantisWorkerMetadataWritable writable = new FilterableMantisWorkerMetadataWritable(workerMeta.getWorkerIndex(),
workerMeta.getWorkerNumber(),
workerMeta.getJobId(),
workerMeta.getStageNum(),
workerMeta.getNumberOfPorts());
setWorkerMetadataWritable(writable, workerMeta);
return writable;
}
public static void setWorkerMetadataWritable(MantisWorkerMetadataWritable writable, IMantisWorkerMetadata workerMeta) {
writable.setAcceptedAt(workerMeta.getAcceptedAt());
writable.setLaunchedAt(workerMeta.getLaunchedAt());
writable.setCompletedAt(workerMeta.getCompletedAt());
writable.setStartingAt(workerMeta.getStartingAt());
writable.setStartedAt(workerMeta.getStartedAt());
writable.setCluster(workerMeta.getCluster());
writable.setResourceCluster(workerMeta.getResourceCluster());
writable.setSlave(workerMeta.getSlave());
writable.setSlaveID(workerMeta.getSlaveID());
Optional<WorkerPorts> wPorts = workerMeta.getPorts();
if(wPorts.isPresent()) {
WorkerPorts wP = wPorts.get();
writable.addPorts(wP.getPorts());
}
writable.setConsolePort(workerMeta.getConsolePort());
writable.setDebugPort(workerMeta.getDebugPort());
writable.setMetricsPort(workerMeta.getMetricsPort());
writable.setCustomPort(workerMeta.getCustomPort());
MantisJobState state = convertWorkerStateToMantisJobState(workerMeta.getState());
try {
switch (state) {
case Accepted:
writable.setStateNoValidation(state, workerMeta.getAcceptedAt(), workerMeta.getReason());
break;
case Launched:
writable.setStateNoValidation(state, workerMeta.getLaunchedAt(), workerMeta.getReason());
break;
case StartInitiated:
writable.setStateNoValidation(state, workerMeta.getStartingAt(), workerMeta.getReason());
break;
case Started:
writable.setStateNoValidation(state, workerMeta.getStartedAt(), workerMeta.getReason());
break;
case Failed:
writable.setStateNoValidation(state, workerMeta.getCompletedAt(), workerMeta.getReason());
break;
case Completed:
writable.setStateNoValidation(state, workerMeta.getCompletedAt(), workerMeta.getReason());
break;
default:
assert false : "Unexpected job state to set";
}
} catch (Exception e) {
throw new RuntimeException("Error converting to MantisWorkerWriteable " + e.getMessage());
}
writable.setResubmitInfo(workerMeta.getResubmitOf(),workerMeta.getTotalResubmitCount());
writable.setReason(workerMeta.getReason());
}
/**
* Convert/Deserialize metadata into a {@link JobWorker}.
*
* The converted object could have no worker ports which returns Null.
*
* Legit Cases:
*
* 1. Loaded worker was in Accepted state (hasn't been assigned ports yet).
* 2. Loaded worker was in Archived state but previously archived from Accepted state.
*
* Error Cases:
*
* 1. Loaded worker was in Non-Accepted state (data corruption).
* 2. Loaded worker was in Archived state but previously was running or completed (data corruption, but same
* semantic as Legit Case 2 above.
*
* @return a valid converted job worker.
*/
public static JobWorker convertMantisWorkerMetadataWriteableToMantisWorkerMetadata(MantisWorkerMetadata writeable, LifecycleEventPublisher eventPublisher) {
if(logger.isDebugEnabled()) { logger.debug("DataFormatAdatper:converting worker {}", writeable); }
String jobId = writeable.getJobId();
List<Integer> ports = new ArrayList<>(writeable.getNumberOfPorts());
ports.add(writeable.getMetricsPort());
ports.add(writeable.getDebugPort());
ports.add(writeable.getConsolePort());
ports.add(writeable.getCustomPort());
if(writeable.getPorts().size() > 0) {
ports.add(writeable.getPorts().get(0));
}
WorkerPorts workerPorts = null;
try {
workerPorts = new WorkerPorts(ports);
} catch (IllegalArgumentException | IllegalStateException e) {
logger.warn("problem loading worker {} for Job ID {}", writeable.getWorkerId(), jobId, e);
}
JobWorker.Builder builder = new JobWorker.Builder()
.withJobId(jobId)
.withAcceptedAt(writeable.getAcceptedAt())
.withLaunchedAt(writeable.getLaunchedAt())
.withStartingAt(writeable.getStartingAt())
.withStartedAt(writeable.getStartedAt())
.withCompletedAt(writeable.getCompletedAt())
.withNumberOfPorts(ports.size())
.withWorkerPorts(workerPorts)
.withResubmitCount(writeable.getTotalResubmitCount())
.withResubmitOf(writeable.getResubmitOf())
.withSlave(writeable.getSlave())
.withSlaveID(writeable.getSlaveID())
.withStageNum(writeable.getStageNum())
.withState(convertMantisJobStateToWorkerState(writeable.getState()))
.withWorkerIndex(writeable.getWorkerIndex())
.withWorkerNumber(writeable.getWorkerNumber())
.withJobCompletedReason(writeable.getReason())
.withPreferredCluster(writeable.getCluster())
.withLifecycleEventsPublisher(eventPublisher);
writeable.getResourceCluster().ifPresent(builder::withResourceCluster);
JobWorker converted = builder.build();
if( logger.isDebugEnabled()) { logger.debug("DataFormatAdatper:converted worker {}", converted); }
return converted;
}
public static MantisStageMetadataWritable convertMantisStageMetadataToMantisStageMetadataWriteable(IMantisStageMetadata stageMeta) {
return new MantisStageMetadataWritable(stageMeta.getJobId().getId(),
stageMeta.getStageNum(),
stageMeta.getNumStages(),
stageMeta.getMachineDefinition(),
stageMeta.getNumWorkers(),
stageMeta.getHardConstraints(),
stageMeta.getSoftConstraints(),
stageMeta.getScalingPolicy(),
stageMeta.getScalable()
);
}
public static FilterableMantisStageMetadataWritable convertFilterableMantisStageMetadataToMantisStageMetadataWriteable(IMantisStageMetadata stageMeta) {
return new FilterableMantisStageMetadataWritable(stageMeta.getJobId().getId(),
stageMeta.getStageNum(),
stageMeta.getNumStages(),
stageMeta.getMachineDefinition(),
stageMeta.getNumWorkers(),
stageMeta.getHardConstraints(),
stageMeta.getSoftConstraints(),
stageMeta.getScalingPolicy(),
stageMeta.getScalable()
);
}
public static io.mantisrx.server.master.domain.SLA convertToSLA(NamedJob.SLA sla) {
return new io.mantisrx.server.master.domain.SLA(sla.getMin(),sla.getMax(),sla.getCronSpec(),convertToCronPolicy(sla.getCronPolicy()));
}
public static IJobClusterDefinition.CronPolicy convertToCronPolicy(NamedJobDefinition.CronPolicy cronPolicy) {
if(cronPolicy != null) {
switch (cronPolicy) {
case KEEP_EXISTING:
return IJobClusterDefinition.CronPolicy.KEEP_EXISTING;
case KEEP_NEW:
return IJobClusterDefinition.CronPolicy.KEEP_NEW;
default:
return IJobClusterDefinition.CronPolicy.KEEP_NEW;
}
}
return null;
}
public static IMantisJobMetadata convertMantisJobWriteableToMantisJobMetadata(MantisJobMetadata archJob, LifecycleEventPublisher eventPublisher) throws Exception {
return convertMantisJobWriteableToMantisJobMetadata(archJob, eventPublisher, false);
}
// TODO job specific migration config is not supported, migration config will be at cluster level
public static IMantisJobMetadata convertMantisJobWriteableToMantisJobMetadata(MantisJobMetadata archJob, LifecycleEventPublisher eventPublisher, boolean isArchived) throws Exception {
if(logger.isTraceEnabled()) { logger.trace("DataFormatAdapter:Converting {}", archJob); }
// convert stages to new format
List<IMantisStageMetadata> convertedStageList = new ArrayList<>();
for (MantisStageMetadata stageMeta : archJob.getStageMetadata()) {
// if this is an archived job then add workerIndex may fail as there maybe
// multiple workers related to a given index so skip adding workers to stage
boolean skipAddingWorkers = isArchived;
convertedStageList.add(convertMantisStageMetadataWriteableToMantisStageMetadata(stageMeta, eventPublisher, skipAddingWorkers));
}
// generate SchedulingInfo
SchedulingInfo schedulingInfo = generateSchedulingInfo(convertedStageList);
URL jarUrl = archJob.getJarUrl();
Optional<String> artifactName = extractArtifactName(jarUrl);
// generate job defn
JobDefinition jobDefn = new JobDefinition(archJob.getName(), archJob.getUser(),
artifactName.orElse(""), null,archJob.getParameters(), archJob.getSla(),
archJob.getSubscriptionTimeoutSecs(),schedulingInfo, archJob.getNumStages(),archJob.getLabels(), null);
Optional<JobId> jIdOp = JobId.fromId(archJob.getJobId());
if(!jIdOp.isPresent()) {
throw new IllegalArgumentException("Invalid JobId " + archJob.getJobId());
}
// generate job meta
MantisJobMetadataImpl mantisJobMetadata = new MantisJobMetadataImpl(jIdOp.get(), archJob.getSubmittedAt(),
archJob.getStartedAt(), jobDefn, convertMantisJobStateToJobState(archJob.getState()),
archJob.getNextWorkerNumberToUse(), archJob.getHeartbeatIntervalSecs(), archJob.getWorkerTimeoutSecs());
// add the stages
for(IMantisStageMetadata stageMetadata : convertedStageList) {
mantisJobMetadata.addJobStageIfAbsent(stageMetadata);
}
if(logger.isTraceEnabled()) { logger.trace("DataFormatAdapter:Completed conversion to IMantisJobMetadata {}",
mantisJobMetadata); }
return mantisJobMetadata;
}
private static StageSchedulingInfo generateStageSchedulingInfo(IMantisStageMetadata mantisStageMetadata) {
return StageSchedulingInfo.builder()
.numberOfInstances(mantisStageMetadata.getNumWorkers())
.machineDefinition(mantisStageMetadata.getMachineDefinition())
.hardConstraints(mantisStageMetadata.getHardConstraints())
.softConstraints(mantisStageMetadata.getSoftConstraints())
.scalingPolicy(mantisStageMetadata.getScalingPolicy())
.scalable(mantisStageMetadata.getScalable())
.build();
}
private static SchedulingInfo generateSchedulingInfo(List<IMantisStageMetadata> convertedStageList) {
Map<Integer, StageSchedulingInfo> stageSchedulingInfoMap = new HashMap<>();
Iterator<IMantisStageMetadata> it = convertedStageList.iterator();
while(it.hasNext()) {
IMantisStageMetadata stageMeta = it.next();
StageSchedulingInfo stageSchedulingInfo = generateStageSchedulingInfo(stageMeta);
stageSchedulingInfoMap.put(stageMeta.getStageNum(), stageSchedulingInfo);
}
SchedulingInfo schedulingInfo = new SchedulingInfo(stageSchedulingInfoMap);
return schedulingInfo;
}
public static IMantisStageMetadata convertMantisStageMetadataWriteableToMantisStageMetadata(
MantisStageMetadata stageMeta,
LifecycleEventPublisher eventPublisher) {
return convertMantisStageMetadataWriteableToMantisStageMetadata(stageMeta,eventPublisher,
false);
}
public static IMantisStageMetadata convertMantisStageMetadataWriteableToMantisStageMetadata(MantisStageMetadata stageMeta,
LifecycleEventPublisher eventPublisher,
boolean skipAddingWorkerMetaData) {
if(logger.isTraceEnabled()) { logger.trace("DataFormatAdapter:converting stage {}, skipadding workers {}", stageMeta, skipAddingWorkerMetaData); }
Optional<JobId> jIdOp = JobId.fromId(stageMeta.getJobId());
if(!jIdOp.isPresent()) {
throw new IllegalArgumentException("Invalid jobid " + stageMeta.getJobId());
}
IMantisStageMetadata newStageMeta = new MantisStageMetadataImpl.Builder()
.withHardConstraints(stageMeta.getHardConstraints())
.withSoftConstraints(stageMeta.getSoftConstraints())
.withJobId(jIdOp.get())
.withMachineDefinition(stageMeta.getMachineDefinition())
.withNumStages(stageMeta.getNumStages())
.withNumWorkers(stageMeta.getNumWorkers())
.withScalingPolicy(stageMeta.getScalingPolicy())
.withStageNum(stageMeta.getStageNum())
.isScalable(stageMeta.getScalable())
.build();
if(!skipAddingWorkerMetaData) {
if(logger.isDebugEnabled()) {logger.debug("Skip adding workers to stage meta");}
stageMeta.getAllWorkers()
.forEach((mantisWorkerMetadata) -> {
((MantisStageMetadataImpl) newStageMeta).addWorkerIndex(convertMantisWorkerMetadataWriteableToMantisWorkerMetadata(mantisWorkerMetadata, eventPublisher));
});
}
if(logger.isDebugEnabled()) { logger.debug("DataFormatAdapter:converted stage {}", newStageMeta); }
return newStageMeta;
}
public static MantisJobMetadataWritable convertMantisJobMetadataToMantisJobMetadataWriteable(IMantisJobMetadata jobMetadata) {
Instant startedAtInstant = jobMetadata.getStartedAtInstant().orElse(Instant.ofEpochMilli(0));
return new MantisJobMetadataWritable(jobMetadata.getJobId().getId(),
jobMetadata.getJobId().getCluster(),
jobMetadata.getUser(),
jobMetadata.getSubmittedAtInstant().toEpochMilli(),
startedAtInstant.toEpochMilli(),
jobMetadata.getJobJarUrl(),
jobMetadata.getTotalStages(),
jobMetadata.getSla().orElse(null),
convertToMantisJobState(jobMetadata.getState()),
jobMetadata.getWorkerTimeoutSecs(),
jobMetadata.getHeartbeatIntervalSecs(),
jobMetadata.getSubscriptionTimeoutSecs(),
jobMetadata.getParameters(),
jobMetadata.getNextWorkerNumberToUse(),
// TODO need to wire migration config here so it can get persisted
null,
jobMetadata.getLabels());
}
public static FilterableMantisJobMetadataWritable convertMantisJobMetadataToFilterableMantisJobMetadataWriteable(IMantisJobMetadata jobMetadata) {
Instant startedAtInstant = jobMetadata.getStartedAtInstant().orElse(Instant.ofEpochMilli(0));
return new FilterableMantisJobMetadataWritable(jobMetadata.getJobId().getId(),
jobMetadata.getJobId().getCluster(),
jobMetadata.getUser(),
jobMetadata.getSubmittedAtInstant().toEpochMilli(),
startedAtInstant.toEpochMilli(),
jobMetadata.getJobJarUrl(),
jobMetadata.getTotalStages(),
jobMetadata.getSla().orElse(null),
convertToMantisJobState(jobMetadata.getState()),
jobMetadata.getWorkerTimeoutSecs(),
jobMetadata.getHeartbeatIntervalSecs(),
jobMetadata.getSubscriptionTimeoutSecs(),
jobMetadata.getParameters(),
jobMetadata.getNextWorkerNumberToUse(),
// TODO need to wire migration config here so it can get persisted
null,
jobMetadata.getLabels(),
jobMetadata.getJobCosts());
}
public static JobState convertMantisJobStateToJobState(MantisJobState state) {
JobState oState;
switch(state) {
case Accepted:
oState = JobState.Accepted;
break;
case Launched:
oState = JobState.Launched;
break;
case Started:
oState = JobState.Launched;
break;
case StartInitiated:
oState = JobState.Launched;
break;
case Completed:
oState = JobState.Completed;
break;
case Failed:
oState = JobState.Failed;
break;
default:
oState = JobState.Noop;
break;
}
return oState;
}
public static MantisJobState convertToMantisJobState(JobState state) {
MantisJobState oldState;
switch(state) {
case Accepted:
oldState = MantisJobState.Accepted;
break;
case Launched:
oldState = MantisJobState.Launched;
break;
case Terminating_abnormal:
oldState = MantisJobState.Failed;
break;
case Terminating_normal:
oldState = MantisJobState.Completed;
break;
case Failed:
oldState = MantisJobState.Failed;
break;
case Completed:
oldState = MantisJobState.Completed;
break;
case Noop:
oldState = MantisJobState.Noop;
break;
default:
oldState = MantisJobState.Noop;
}
return oldState;
}
public static MantisJobState convertWorkerStateToMantisJobState(WorkerState state) {
MantisJobState wState;
switch(state) {
case Accepted:
wState = MantisJobState.Accepted;
break;
case Failed:
wState = MantisJobState.Failed;
break;
case Completed:
wState = MantisJobState.Completed;
break;
case Noop:
wState = MantisJobState.Noop;
break;
case StartInitiated:
wState = MantisJobState.StartInitiated;
break;
case Started:
wState = MantisJobState.Started;
break;
case Launched:
wState = MantisJobState.Launched;
break;
default:
wState = MantisJobState.Noop;
break;
}
return wState;
}
public static WorkerState convertMantisJobStateToWorkerState(MantisJobState state) {
WorkerState wState;
switch(state) {
case Accepted:
wState = WorkerState.Accepted;
break;
case Failed:
wState = WorkerState.Failed;
break;
case Completed:
wState = WorkerState.Completed;
break;
case Noop:
wState = WorkerState.Noop;
break;
case StartInitiated:
wState = WorkerState.StartInitiated;
break;
case Started:
wState = WorkerState.Started;
break;
case Launched:
wState = WorkerState.Launched;
break;
default:
wState = WorkerState.Unknown;
break;
}
return wState;
}
public static List<JobClusterInfo.JarInfo> convertNamedJobJarListToJarInfoList(List<NamedJob.Jar> jars) {
return jars.stream().map((jar) -> new JobClusterInfo.JarInfo(jar.getVersion(),jar.getUploadedAt(),jar.getUrl().toString())).collect(Collectors.toList());
}
}
class NoOpMantisJobOperations implements MantisJobOperations {
@Override
public NamedJob createNamedJob(NamedJobDefinition namedJobDefinition) throws InvalidNamedJobException {
// TODO Auto-generated method stub
return null;
}
@Override
public NamedJob updateNamedJar(NamedJobDefinition namedJobDefinition, boolean createIfNeeded)
throws InvalidNamedJobException {
// TODO Auto-generated method stub
return null;
}
@Override
public NamedJob quickUpdateNamedJob(String user, String name, URL jobJar, String version)
throws InvalidNamedJobException {
// TODO Auto-generated method stub
return null;
}
@Override
public void updateSla(String user, String name, NamedJob.SLA sla, boolean forceEnable) throws InvalidNamedJobException {
// TODO Auto-generated method stub
}
@Override
public void updateLabels(String user, String name, List<Label> labels) throws InvalidNamedJobException {
// TODO Auto-generated method stub
}
@Override
public void updateMigrateStrategy(String user, String name, WorkerMigrationConfig migrationConfig)
throws InvalidNamedJobException {
// TODO Auto-generated method stub
}
@Override
public String quickSubmit(String jobName, String user)
throws InvalidNamedJobException, io.mantisrx.server.master.store.InvalidJobException {
// TODO Auto-generated method stub
return null;
}
@Override
public Optional<NamedJob> getNamedJob(String name) {
// TODO Auto-generated method stub
return null;
}
@Override
public void deleteNamedJob(String name, String user) throws NamedJobDeleteException {
// TODO Auto-generated method stub
}
@Override
public void disableNamedJob(String name, String user) throws InvalidNamedJobException {
// TODO Auto-generated method stub
}
@Override
public void enableNamedJob(String name, String user) throws InvalidNamedJobException {
// TODO Auto-generated method stub
}
@Override
public MantisJobStatus submit(MantisJobDefinition jobDefinition) {
// TODO Auto-generated method stub
return null;
}
@Override
public boolean deleteJob(String jobId) throws IOException {
// TODO Auto-generated method stub
return false;
}
@Override
public void killJob(String user, String jobId, String reason) {
// TODO Auto-generated method stub
}
@Override
public void terminateJob(String jobId) {
// TODO Auto-generated method stub
}
@Override
public Observable<MantisJobStatus> jobs() {
// TODO Auto-generated method stub
return null;
}
@Override
public MantisJobStatus status(String jobId) {
// TODO Auto-generated method stub
return null;
}
@Override
public Action1<String> getSlaveDisabler() {
// TODO Auto-generated method stub
return null;
}
@Override
public Action1<String> getSlaveEnabler() {
// TODO Auto-generated method stub
return null;
}
@Override
public void setReady() {
// TODO Auto-generated method stub
}
}
| 8,002 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/domain/Messages.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import akka.actor.ActorRef;
public class Messages {
public static final Object Put = new PutMessage() {
@Override
public String toString() { return "Put"; }
};
public static final Object Take = new TakeMessage() {
@Override
public String toString() { return "Take"; }
};
public static final Object Think = new ThinkMessage() {};
private interface PutMessage {}
private interface TakeMessage {}
private interface ThinkMessage {}
public static final class Busy {
public final ActorRef chopstick;
public Busy(ActorRef chopstick) {
this.chopstick = chopstick;
}
}
public static final class Taken {
public final ActorRef chopstick;
public Taken(ActorRef chopstick) {
this.chopstick = chopstick;
}
}
} | 8,003 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/domain/JobId.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Optional;
public class JobId implements Comparable<JobId> {
private static final String DELIMITER = "-";
private final String cluster;
private final long jobNum;
private final String id;
/**
* @param clusterName The Job Cluster that this jobID belongs to
* @param jobNum Identifies the job for this cluster
*/
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public JobId(@JsonProperty("clusterName") final String clusterName,
@JsonProperty("jobNum") final long jobNum) {
this.cluster = clusterName;
this.jobNum = jobNum;
this.id = clusterName + DELIMITER + jobNum;
}
/*
Returns a valid JobId if the passed 'id' string is wellformed
*/
public static Optional<JobId> fromId(final String id) {
final int i = id.lastIndexOf(DELIMITER);
if (i < 0) {
return Optional.empty();
}
final String jobCluster = id.substring(0, i);
try {
final int jobNum = Integer.parseInt(id.substring(i + 1));
return Optional.ofNullable(new JobId(jobCluster, jobNum));
} catch (NumberFormatException nfe) {
return Optional.empty();
}
}
public String getCluster() {
return cluster;
}
public long getJobNum() {
return jobNum;
}
public String getId() {
return id;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((cluster == null) ? 0 : cluster.hashCode());
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + (int) (jobNum ^ (jobNum >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
JobId other = (JobId) obj;
if (cluster == null) {
if (other.cluster != null)
return false;
} else if (!cluster.equals(other.cluster))
return false;
if (id == null) {
if (other.id != null)
return false;
} else if (!id.equals(other.id))
return false;
return jobNum == other.jobNum;
}
@Override
public String toString() {
return id;
}
@Override
public int compareTo(JobId o) {
return id.compareTo(o.getId());
}
}
| 8,004 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/domain/JobClusterConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import java.util.Objects;
public class JobClusterConfig {
private final String artifactName;
private final String version;
private final long uploadedAt;
private final SchedulingInfo schedulingInfo;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public JobClusterConfig(@JsonProperty("artifactName") String artifactName,
@JsonProperty("uploadedAt") long uploadedAt,
@JsonProperty("version") String version,
@JsonProperty("schedulingInfo") SchedulingInfo schedulingInfo
) {
this.artifactName = artifactName;
this.uploadedAt = uploadedAt;
this.version = (version == null || version.isEmpty()) ?
"" + System.currentTimeMillis() :
version;
this.schedulingInfo = schedulingInfo;
}
public String getArtifactName() {
return artifactName;
}
public long getUploadedAt() {
return uploadedAt;
}
public String getVersion() {
return version;
}
public SchedulingInfo getSchedulingInfo() {
return schedulingInfo;
}
@Override
public String toString() {
return "JobClusterConfig [artifactName=" + artifactName + ", version=" + version + ", uploadedAt=" + uploadedAt
+ ", schedulingInfo=" + schedulingInfo + "]";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
JobClusterConfig that = (JobClusterConfig) o;
return uploadedAt == that.uploadedAt &&
Objects.equals(artifactName, that.artifactName) &&
Objects.equals(version, that.version) &&
Objects.equals(schedulingInfo, that.schedulingInfo);
}
@Override
public int hashCode() {
return Objects.hash(artifactName, version, uploadedAt, schedulingInfo);
}
public static class Builder {
String artifactName;
String version;
long uploadedAt = -1;
SchedulingInfo schedulingInfo;
public Builder() {}
public Builder withArtifactName(String artifactName) {
Preconditions.checkNotNull(artifactName, "artifactName cannot be null");
Preconditions.checkArgument(!artifactName.isEmpty(), "ArtifactName cannot be empty");
this.artifactName = artifactName;
return this;
}
public Builder withVersion(String version) {
Preconditions.checkNotNull(version, "version cannot be null");
Preconditions.checkArgument(!version.isEmpty(), "version cannot be empty");
this.version = version;
return this;
}
public Builder withUploadedAt(long uAt) {
Preconditions.checkArgument(uAt > 0, "uploaded At cannot be <= 0");
this.uploadedAt = uAt;
return this;
}
public Builder withSchedulingInfo(SchedulingInfo sInfo) {
Preconditions.checkNotNull(sInfo, "schedulingInfo cannot be null");
this.schedulingInfo = sInfo;
return this;
}
public Builder from(JobClusterConfig config) {
artifactName = config.getArtifactName();
version = config.getVersion();
uploadedAt = config.getUploadedAt();
schedulingInfo = config.getSchedulingInfo();
return this;
}
// TODO add validity checks for SchedulingInfo, MachineDescription etc
public JobClusterConfig build() {
Preconditions.checkNotNull(artifactName);
Preconditions.checkNotNull(schedulingInfo);
this.uploadedAt = (uploadedAt == -1) ? System.currentTimeMillis() : uploadedAt;
this.version = (version == null || version.isEmpty()) ? "" + System.currentTimeMillis() : version;
return new JobClusterConfig(artifactName, uploadedAt, version, schedulingInfo);
}
}
}
| 8,005 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/domain/IJobClusterDefinition.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.parameter.Parameter;
import java.util.List;
public interface IJobClusterDefinition {
JobOwner getOwner();
SLA getSLA();
WorkerMigrationConfig getWorkerMigrationConfig();
boolean getIsReadyForJobMaster();
List<JobClusterConfig> getJobClusterConfigs();
JobClusterConfig getJobClusterConfig();
String getName();
String getUser();
String toString();
List<Parameter> getParameters();
List<Label> getLabels();
enum CronPolicy {KEEP_EXISTING, KEEP_NEW}
} | 8,006 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/domain/JobClusterDefinitionImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import io.mantisrx.common.Label;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JobClusterDefinitionImpl implements IJobClusterDefinition {
private static final Logger logger = LoggerFactory.getLogger(JobClusterDefinitionImpl.class);
private final String name;
private final String user;
private final JobOwner owner;
private final SLA sla;
private final WorkerMigrationConfig migrationConfig;
private final List<JobClusterConfig> jobClusterConfigs = Lists.newArrayList();
private final List<Parameter> parameters;
private final List<Label> labels;
private boolean isReadyForJobMaster = false;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public JobClusterDefinitionImpl(@JsonProperty("name") String name,
@JsonProperty("jobClusterConfigs") List<JobClusterConfig> jobClusterConfigs,
@JsonProperty("owner") JobOwner owner,
@JsonProperty("user") String user,
@JsonProperty("sla") SLA sla,
@JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig,
@JsonProperty("isReadyForJobMaster") boolean isReadyForJobMaster,
@JsonProperty("parameters") List<Parameter> parameters,
@JsonProperty("labels") List<Label> labels
) {
Preconditions.checkNotNull(jobClusterConfigs);
Preconditions.checkArgument(!jobClusterConfigs.isEmpty());
this.owner = owner;
this.name = name;
this.sla = Optional.ofNullable(sla).orElse(new SLA(0, 0, null, CronPolicy.KEEP_EXISTING));
this.migrationConfig = Optional.ofNullable(migrationConfig).orElse(WorkerMigrationConfig.DEFAULT);
this.isReadyForJobMaster = isReadyForJobMaster;
this.jobClusterConfigs.addAll(jobClusterConfigs);
this.labels = Optional.ofNullable(labels).orElse(Lists.newArrayList());
this.parameters = Optional.ofNullable(parameters).orElse(Lists.newArrayList());
this.user = user;
}
/* (non-Javadoc)
* @see io.mantisrx.server.master.domain.IJobClusterDefinition#getOwner()
*/
@Override
public JobOwner getOwner() {
return owner;
}
/* (non-Javadoc)
* @see io.mantisrx.server.master.domain.IJobClusterDefinition#getSLA()
*/
@Override
public SLA getSLA() {
return this.sla;
}
/* (non-Javadoc)
* @see io.mantisrx.server.master.domain.IJobClusterDefinition#getWorkerMigrationConfig()
*/
@Override
public WorkerMigrationConfig getWorkerMigrationConfig() {
return this.migrationConfig;
}
/* (non-Javadoc)
* @see io.mantisrx.server.master.domain.IJobClusterDefinition#getIsReadyForJobMaster()
*/
@Override
public boolean getIsReadyForJobMaster() {
return this.isReadyForJobMaster;
}
/* (non-Javadoc)
* @see io.mantisrx.server.master.domain.IJobClusterDefinition#getJobClusterConfigs()
*/
@Override
public List<JobClusterConfig> getJobClusterConfigs() {
return Collections.unmodifiableList(this.jobClusterConfigs);
}
/* (non-Javadoc)
* @see io.mantisrx.server.master.domain.IJobClusterDefinition#getJobClusterConfig()
*/
@Override
public JobClusterConfig getJobClusterConfig() {
return this.jobClusterConfigs.get(jobClusterConfigs.size() - 1);
}
/* (non-Javadoc)
* @see io.mantisrx.server.master.domain.IJobClusterDefinition#getName()
*/
@Override
public String getName() {
return name;
}
/* (non-Javadoc)
* @see io.mantisrx.server.master.domain.IJobClusterDefinition#getUser()
*/
@Override
public String getUser() {
return user;
}
@Override
public List<Parameter> getParameters() {
return Collections.unmodifiableList(this.parameters);
}
@Override
public List<Label> getLabels() {
return Collections.unmodifiableList(this.labels);
}
@Override
public String toString() {
return "JobClusterDefinitionImpl{" +
"name='" + name + '\'' +
", user='" + user + '\'' +
", owner=" + owner +
", sla=" + sla +
", migrationConfig=" + migrationConfig +
", isReadyForJobMaster=" + isReadyForJobMaster +
", jobClusterConfigs=" + jobClusterConfigs +
", parameters=" + parameters +
", labels=" + labels +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
JobClusterDefinitionImpl that = (JobClusterDefinitionImpl) o;
return isReadyForJobMaster == that.isReadyForJobMaster &&
Objects.equals(name, that.name) &&
Objects.equals(user, that.user) &&
Objects.equals(owner, that.owner) &&
Objects.equals(sla, that.sla) &&
Objects.equals(migrationConfig, that.migrationConfig) &&
Objects.equals(jobClusterConfigs, that.jobClusterConfigs) &&
Objects.equals(parameters, that.parameters) &&
Objects.equals(labels, that.labels);
}
@Override
public int hashCode() {
return Objects.hash(name, user, owner, sla, migrationConfig, isReadyForJobMaster, jobClusterConfigs, parameters, labels);
}
public static class CompletedJob {
private final String name;
private final String jobId;
private final String version;
private final JobState state;
private final long submittedAt;
private final long terminatedAt;
private final String user;
private final List<Label> labelList;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public CompletedJob(
@JsonProperty("name") String name,
@JsonProperty("jobId") String jobId,
@JsonProperty("version") String version,
@JsonProperty("state") JobState state,
@JsonProperty("submittedAt") long submittedAt,
@JsonProperty("terminatedAt") long terminatedAt,
@JsonProperty("user") String user,
@JsonProperty("labels") List<Label> labels
) {
this.name = name;
this.jobId = jobId;
this.version = version;
this.state = state;
this.submittedAt = submittedAt;
this.terminatedAt = terminatedAt;
this.user = user;
this.labelList = labels;
}
public String getName() {
return name;
}
public String getJobId() {
return jobId;
}
public String getVersion() {
return version;
}
public JobState getState() {
return state;
}
public long getSubmittedAt() {
return submittedAt;
}
public long getTerminatedAt() {
return terminatedAt;
}
public String getUser() {
return user;
}
public List<Label> getLabelList() {
return labelList;
}
@Override
public String toString() {
return "CompletedJob{" +
"name='" + name + '\'' +
", jobId='" + jobId + '\'' +
", version='" + version + '\'' +
", state=" + state +
", submittedAt=" + submittedAt +
", terminatedAt=" + terminatedAt +
", user='" + user + '\'' +
", labelList=" + labelList +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CompletedJob that = (CompletedJob) o;
return submittedAt == that.submittedAt &&
terminatedAt == that.terminatedAt &&
Objects.equals(name, that.name) &&
Objects.equals(jobId, that.jobId) &&
Objects.equals(version, that.version) &&
state == that.state &&
Objects.equals(user, that.user);
}
@Override
public int hashCode() {
return Objects.hash(name, jobId, version, state, submittedAt, terminatedAt, user);
}
}
public static class Builder {
List<JobClusterConfig> jobClusterConfigs = new ArrayList<>();
JobOwner owner = null;
SLA sla = new SLA(0, 0, null, null);
WorkerMigrationConfig migrationConfig = WorkerMigrationConfig.DEFAULT;
boolean isReadyForJobMaster = true;
String name = null;
String user = "default";
List<Parameter> parameters = Lists.newArrayList();
List<Label> labels = Lists.newArrayList();
public Builder() {}
public Builder withName(String name) {
Preconditions.checkNotNull(name, "Cluster name cannot be null");
Preconditions.checkArgument(!name.isEmpty(), "cluster Name cannot be empty");
this.name = name;
return this;
}
public Builder withUser(String user) {
Preconditions.checkNotNull(user, "user cannot be null");
Preconditions.checkArgument(!user.isEmpty(), "user cannot be empty");
this.user = user;
return this;
}
public Builder withJobClusterConfig(JobClusterConfig config) {
Preconditions.checkNotNull(config, "config cannot be null");
if (!jobClusterConfigs.contains(config)) { // skip if this config already exists
jobClusterConfigs.add(config);
}
return this;
}
public Builder withJobClusterConfigs(List<JobClusterConfig> jars) {
Preconditions.checkNotNull(jars, "config list cannot be null");
this.jobClusterConfigs = jars;
return this;
}
public Builder withOwner(JobOwner owner) {
Preconditions.checkNotNull(owner, "owner cannot be null");
this.owner = owner;
return this;
}
public Builder withSla(SLA sla) {
Preconditions.checkNotNull(sla, "sla cannot be null");
this.sla = sla;
return this;
}
public Builder withMigrationConfig(WorkerMigrationConfig config) {
Preconditions.checkNotNull(config, "migration config cannot be null");
this.migrationConfig = config;
return this;
}
public Builder withIsReadyForJobMaster(boolean ready) {
this.isReadyForJobMaster = ready;
return this;
}
public Builder withParameters(List<Parameter> ps) {
this.parameters = ps;
return this;
}
public Builder withLabels(List<Label> labels) {
this.labels = labels;
return this;
}
public Builder withLabel(Label label) {
Preconditions.checkNotNull(label, "label cannot be null");
this.labels.add(label);
return this;
}
public Builder from(IJobClusterDefinition defn) {
migrationConfig = defn.getWorkerMigrationConfig();
name = defn.getName();
sla = defn.getSLA();
isReadyForJobMaster = defn.getIsReadyForJobMaster();
owner = defn.getOwner();
user = defn.getUser();
parameters = defn.getParameters();
labels = defn.getLabels();
// we don't want to duplicates but retain the order so cannot use Set
for (JobClusterConfig jcConfig : defn.getJobClusterConfigs()) {
if (!jobClusterConfigs.contains(jcConfig)) {
jobClusterConfigs.add(jcConfig);
}
}
//defn.getJobClusterConfigs().forEach(jobClusterConfigs::add);
return this;
}
public Builder mergeConfigsAndOverrideRest(IJobClusterDefinition oldDefn, IJobClusterDefinition newDefn) {
List<JobClusterConfig> oldConfigs = oldDefn.getJobClusterConfigs();
logger.info("Existing JobClusterConfigs {} ", oldConfigs);
logger.info("New JobClusterConfig {} ", newDefn.getJobClusterConfig());
if (oldConfigs != null) {
List<JobClusterConfig> subList = Collections.unmodifiableList(oldConfigs.subList(
Math.max(0, oldConfigs.size() - 3),
oldConfigs.size()
));
this.jobClusterConfigs.addAll(subList);
}
this.jobClusterConfigs.add(newDefn.getJobClusterConfig());
logger.info("Merged JobClusterConfigs {} ", this.jobClusterConfigs);
this.sla = newDefn.getSLA();
this.parameters = newDefn.getParameters();
this.labels = newDefn.getLabels();
this.user = newDefn.getUser();
this.migrationConfig = newDefn.getWorkerMigrationConfig();
this.owner = newDefn.getOwner();
this.isReadyForJobMaster = newDefn.getIsReadyForJobMaster();
this.name = oldDefn.getName();
return this;
}
public JobClusterDefinitionImpl build() {
Preconditions.checkNotNull(owner);
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(user);
Preconditions.checkNotNull(jobClusterConfigs);
Preconditions.checkArgument(!jobClusterConfigs.isEmpty());
return new JobClusterDefinitionImpl(name, jobClusterConfigs, owner, user, sla, migrationConfig, isReadyForJobMaster, parameters, labels);
}
}
}
| 8,007 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/domain/JobDefinition.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import static io.mantisrx.master.jobcluster.LabelManager.SystemLabels.MANTIS_RESOURCE_CLUSTER_NAME_LABEL;
import io.mantisrx.common.Label;
import io.mantisrx.master.jobcluster.LabelManager.SystemLabels;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.command.InvalidJobException;
import io.mantisrx.runtime.descriptor.DeploymentStrategy;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import io.mantisrx.shaded.com.google.common.base.Strings;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Function;
import java.util.stream.Collectors;
import lombok.ToString;
@ToString
public class JobDefinition {
private final String name;
private final String user;
private final String artifactName;
private final String version;
private final List<Parameter> parameters;
private final JobSla jobSla;
private final long subscriptionTimeoutSecs;
private final SchedulingInfo schedulingInfo;
private final DeploymentStrategy deploymentStrategy;
private final int withNumberOfStages;
private Map<String, Label> labels; // Map label->name to label instance.
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public JobDefinition(@JsonProperty("name") String name,
@JsonProperty("user") String user,
@JsonProperty("artifactName") String artifactName,
@JsonProperty("version") String version,
@JsonProperty("parameters") List<Parameter> parameters,
@JsonProperty("jobSla") JobSla jobSla,
@JsonProperty("subscriptionTimeoutSecs") long subscriptionTimeoutSecs,
@JsonProperty("schedulingInfo") SchedulingInfo schedulingInfo,
@JsonProperty("numberOfStages") int withNumberOfStages,
@JsonProperty("labels") List<Label> labels,
@JsonProperty("deploymentStrategy") DeploymentStrategy deploymentStrategy
) throws InvalidJobException {
this.name = name;
this.user = user;
this.artifactName = artifactName;
this.version = version;
if (parameters != null) {
this.parameters = parameters;
} else {
this.parameters = new LinkedList<>();
}
if (labels != null) {
this.labels = labels.stream().collect(Collectors.toMap(Label::getName, Function.identity(), (l1, l2) -> l2));
} else {
this.labels = new HashMap<>();
}
this.jobSla = jobSla;
if (subscriptionTimeoutSecs > 0) {
this.subscriptionTimeoutSecs = subscriptionTimeoutSecs;
} else {
this.subscriptionTimeoutSecs = 0;
}
this.schedulingInfo = schedulingInfo;
this.deploymentStrategy = deploymentStrategy;
this.withNumberOfStages = withNumberOfStages;
postProcess();
validate(true);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
JobDefinition that = (JobDefinition) o;
return subscriptionTimeoutSecs == that.subscriptionTimeoutSecs &&
withNumberOfStages == that.withNumberOfStages &&
Objects.equals(name, that.name) &&
Objects.equals(user, that.user) &&
Objects.equals(artifactName, that.artifactName) &&
Objects.equals(version, that.version) &&
Objects.equals(parameters, that.parameters) &&
Objects.equals(jobSla, that.jobSla) &&
Objects.equals(labels, that.labels);
}
@Override
public int hashCode() {
return Objects.hash(name, user, artifactName, version, parameters, jobSla, subscriptionTimeoutSecs, labels, withNumberOfStages);
}
public void validate(boolean schedulingInfoOptional) throws InvalidJobException {
validateSla();
validateSchedulingInfo(schedulingInfoOptional);
}
public boolean requireInheritInstanceCheck() {
return this.schedulingInfo != null && this.deploymentStrategy != null && this.getDeploymentStrategy().requireInheritInstanceCheck();
}
public boolean requireInheritInstanceCheck(int stageNum) {
return this.schedulingInfo != null && this.getSchedulingInfo().getStages().containsKey(stageNum) &&
this.deploymentStrategy != null && this.getDeploymentStrategy().requireInheritInstanceCheck(stageNum);
}
private void validateSla() throws InvalidJobException {
if (jobSla == null)
throw new InvalidJobException("No Job SLA provided (likely incorrect job submit request)");
if (jobSla.getDurationType() == null)
throw new InvalidJobException("Invalid null duration type in job sla (likely incorrect job submit request");
}
public void validateSchedulingInfo() throws InvalidJobException {
validateSchedulingInfo(false);
}
private void postProcess() {
if (this.deploymentStrategy != null && !Strings.isNullOrEmpty(this.deploymentStrategy.getResourceClusterId())) {
this.labels.put(
SystemLabels.MANTIS_RESOURCE_CLUSTER_NAME_LABEL.label,
new Label(
SystemLabels.MANTIS_RESOURCE_CLUSTER_NAME_LABEL.label,
this.deploymentStrategy.getResourceClusterId()));
}
}
private void validateSchedulingInfo(boolean schedulingInfoOptional) throws InvalidJobException {
if (schedulingInfoOptional && schedulingInfo == null)
return;
if (schedulingInfo == null)
throw new InvalidJobException("No scheduling info provided");
if (schedulingInfo.getStages() == null)
throw new InvalidJobException("No stages defined in scheduling info");
int withNumberOfStages = schedulingInfo.getStages().size();
int startingIdx = 1;
if (schedulingInfo.forStage(0) != null) {
// jobMaster stage 0 definition exists, adjust index range
startingIdx = 0;
withNumberOfStages--;
}
for (int i = startingIdx; i <= withNumberOfStages; i++) {
StageSchedulingInfo stage = schedulingInfo.getStages().get(i);
if (stage == null)
throw new InvalidJobException("No definition for stage " + i + " in scheduling info for " + withNumberOfStages + " stage job");
if (stage.getNumberOfInstances() < 1)
throw new InvalidJobException("Number of instance for stage " + i + " must be >0, not " + stage.getNumberOfInstances());
MachineDefinition machineDefinition = stage.getMachineDefinition();
if (machineDefinition.getCpuCores() <= 0)
throw new InvalidJobException("cpuCores must be >0.0, not " + machineDefinition.getCpuCores());
if (machineDefinition.getMemoryMB() <= 0)
throw new InvalidJobException("memory must be <0.0, not " + machineDefinition.getMemoryMB());
if (machineDefinition.getDiskMB() < 0)
throw new InvalidJobException("disk must be >=0, not " + machineDefinition.getDiskMB());
if (machineDefinition.getNumPorts() < 0)
throw new InvalidJobException("numPorts must be >=0, not " + machineDefinition.getNumPorts());
}
}
public String getName() {
return name;
}
public String getUser() {
return user;
}
public String getArtifactName() {
return artifactName;
}
public String getVersion() { return version;}
public List<Parameter> getParameters() {
return Collections.unmodifiableList(parameters);
}
public JobSla getJobSla() {
return jobSla;
}
public long getSubscriptionTimeoutSecs() {
return subscriptionTimeoutSecs;
}
public SchedulingInfo getSchedulingInfo() {
return schedulingInfo;
}
public DeploymentStrategy getDeploymentStrategy() { return deploymentStrategy; }
// // TODO make immutable
// public void setSchedulingInfo(SchedulingInfo schedulingInfo) {
// this.schedulingInfo = schedulingInfo;
// }
public List<Label> getLabels() {
return ImmutableList.copyOf(this.labels.values());
}
public int getNumberOfStages() {
return this.withNumberOfStages;
}
public Optional<ClusterID> getResourceCluster() {
return getLabels()
.stream()
.filter(label -> label.getName().equals(MANTIS_RESOURCE_CLUSTER_NAME_LABEL.label))
.findFirst()
.map(l -> ClusterID.of(l.getValue()));
}
@JsonIgnore
public int getIntSystemParameter(String paramName, int defaultValue) {
return getParameters().stream()
.filter(p -> paramName.equals(p.getName()))
.map(Parameter::getValue)
.filter(Objects::nonNull)
.map(v -> {
try {
return Integer.parseInt(v);
} catch (Exception e) {
return defaultValue;
}})
.findFirst()
.orElse(defaultValue);
}
public static class Builder {
private String name;
private String user;
private List<Parameter> parameters;
private List<Label> labels;
private String artifactName = null;
private String version = null;
private JobSla jobSla = new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, null);
private long subscriptionTimeoutSecs = 0L;
private SchedulingInfo schedulingInfo;
private DeploymentStrategy deploymentStrategy;
private int withNumberOfStages = 1;
public Builder() {
}
public Builder withName(String name) {
this.name = name;
return this;
}
public Builder withArtifactName(String artifactName) {
this.artifactName = artifactName;
return this;
}
public Builder withJobSla(JobSla sla) {
this.jobSla = sla;
return this;
}
public Builder withUser(String user) {
this.user = user;
return this;
}
public Builder withSchedulingInfo(SchedulingInfo schedInfo) {
this.schedulingInfo = schedInfo;
return this;
}
public Builder withDeploymentStrategy(DeploymentStrategy strategy) {
this.deploymentStrategy = strategy;
return this;
}
public Builder withNumberOfStages(int stages) {
this.withNumberOfStages = stages;
return this;
}
public Builder withSubscriptionTimeoutSecs(long t) {
this.subscriptionTimeoutSecs = t;
return this;
}
public Builder withParameters(List<Parameter> params) {
this.parameters = params;
return this;
}
public Builder withLabels(List<Label> labels) {
this.labels = labels;
return this;
}
public Builder withVersion(String version) {
this.version = version;
return this;
}
public Builder from(final JobDefinition jobDefinition) {
this.withJobSla(jobDefinition.getJobSla());
this.withNumberOfStages(jobDefinition.getNumberOfStages());
this.withSubscriptionTimeoutSecs(jobDefinition.getSubscriptionTimeoutSecs());
this.withUser(jobDefinition.user);
this.withSchedulingInfo(jobDefinition.getSchedulingInfo());
this.withDeploymentStrategy(jobDefinition.getDeploymentStrategy());
this.withParameters(jobDefinition.getParameters());
this.withLabels(jobDefinition.getLabels());
this.withName(jobDefinition.name);
this.withArtifactName(jobDefinition.artifactName);
this.withVersion(jobDefinition.getVersion());
return this;
}
public Builder fromWithInstanceCountInheritance(
final JobDefinition jobDefinition,
boolean forceInheritance,
Function<Integer, Optional<Integer>> getExistingInstanceCountForStage) {
this.from(jobDefinition);
SchedulingInfo.Builder mergedSInfoBuilder = new SchedulingInfo.Builder().createWithInstanceInheritance(
jobDefinition.getSchedulingInfo().getStages(),
getExistingInstanceCountForStage,
jobDefinition::requireInheritInstanceCheck,
forceInheritance);
this.withSchedulingInfo(mergedSInfoBuilder.build());
return this;
}
public JobDefinition build() throws InvalidJobException {
Preconditions.checkNotNull(name, "cluster name cannot be null");
Preconditions.checkNotNull(jobSla, "job sla cannot be null");
// Preconditions.checkNotNull(schedulingInfo, "schedulingInfo cannot be null");
if (schedulingInfo != null) {
withNumberOfStages = schedulingInfo.getStages().size();
}
Preconditions.checkArgument(withNumberOfStages > 0, "Number of stages cannot be less than 0");
return new JobDefinition(
name, user, artifactName, version, parameters, jobSla,
subscriptionTimeoutSecs, schedulingInfo, withNumberOfStages, labels, deploymentStrategy);
}
}
}
| 8,008 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/JobNameAlreadyExistsException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
public class JobNameAlreadyExistsException extends Exception {
public JobNameAlreadyExistsException(String message) {
super(message);
}
public JobNameAlreadyExistsException(String message, Throwable cause) {
super(message, cause);
}
}
| 8,009 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/NamedJobDeleteException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
public class NamedJobDeleteException extends Exception {
public NamedJobDeleteException(String reason) {
super(reason);
}
public NamedJobDeleteException(String reason, Throwable cause) {
super(reason, cause);
}
}
| 8,010 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/MantisJobMetadata.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.parameter.Parameter;
import java.net.URL;
import java.util.Collection;
import java.util.List;
public interface MantisJobMetadata {
long DEFAULT_STARTED_AT_EPOCH = 0;
String getJobId();
String getName();
String getUser();
long getSubmittedAt();
long getStartedAt();
URL getJarUrl();
JobSla getSla();
long getSubscriptionTimeoutSecs();
MantisJobState getState();
List<Parameter> getParameters();
List<Label> getLabels();
Collection<? extends MantisStageMetadata> getStageMetadata();
int getNumStages();
MantisStageMetadata getStageMetadata(int stageNum);
MantisWorkerMetadata getWorkerByIndex(int stageNumber, int workerIndex) throws InvalidJobException;
MantisWorkerMetadata getWorkerByNumber(int workerNumber) throws InvalidJobException;
AutoCloseable obtainLock();
int getNextWorkerNumberToUse();
WorkerMigrationConfig getMigrationConfig();
long getHeartbeatIntervalSecs();
long getWorkerTimeoutSecs();
}
| 8,011 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/MantisWorkerMetadataWritable.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.locks.ReentrantLock;
public class MantisWorkerMetadataWritable implements MantisWorkerMetadata {
@JsonIgnore
private final WorkerId workerId;
@JsonIgnore
private final ReentrantLock lock = new ReentrantLock();
private int workerIndex;
private int workerNumber;
private String jobId;
private int stageNum;
private int numberOfPorts;
private int metricsPort;
private int consolePort;
private int debugPort = -1;
private int customPort;
private List<Integer> ports;
private volatile MantisJobState state;
private String slave;
private String slaveID;
private Optional<String> cluster = Optional.empty();
private Optional<ClusterID> resourceCluster = Optional.empty();
private long acceptedAt = 0;
private long launchedAt = 0;
private long startingAt = 0;
private long startedAt = 0;
private long completedAt = 0;
private JobCompletedReason reason;
private int resubmitOf = -1;
private int totalResubmitCount = 0;
@JsonIgnore
private volatile long lastHeartbeatAt = 0;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public MantisWorkerMetadataWritable(@JsonProperty("workerIndex") int workerIndex,
@JsonProperty("workerNumber") int workerNumber,
@JsonProperty("jobId") String jobId,
@JsonProperty("stageNum") int stageNum,
@JsonProperty("numberOfPorts") int numberOfPorts) {
this.workerIndex = workerIndex;
this.workerNumber = workerNumber;
this.jobId = jobId;
this.workerId = new WorkerId(jobId, workerIndex, workerNumber);
this.stageNum = stageNum;
this.numberOfPorts = numberOfPorts;
this.state = MantisJobState.Accepted;
this.acceptedAt = System.currentTimeMillis();
this.ports = new ArrayList<>();
}
@Override
public int getWorkerIndex() {
return workerIndex;
}
@Override
public int getWorkerNumber() {
return workerNumber;
}
@Override
public WorkerId getWorkerId() {
return workerId;
}
@Override
public String getJobId() {
return jobId;
}
@Override
public int getStageNum() {
return stageNum;
}
@Override
public int getNumberOfPorts() {
return numberOfPorts;
}
@Override
public List<Integer> getPorts() {
return ports;
}
@Override
public void addPorts(List<Integer> ports) {
this.ports.addAll(ports);
}
@Override
public int getTotalResubmitCount() {
return totalResubmitCount;
}
@Override
public int getMetricsPort() {
return metricsPort;
}
public void setMetricsPort(int metricsPort) {
this.metricsPort = metricsPort;
}
@Override
public int getDebugPort() {
return debugPort;
}
public void setDebugPort(int debugPort) {
this.debugPort = debugPort;
}
@Override
public int getConsolePort() {
return consolePort;
}
public void setConsolePort(int port) {
this.consolePort = port;
}
@Override
public int getCustomPort() {
return customPort;
}
public void setCustomPort(int port) {
this.customPort = port;
}
@Override
public int getResubmitOf() {
return resubmitOf;
}
@JsonIgnore
public void setResubmitInfo(int resubmitOf, int totalCount) {
this.resubmitOf = resubmitOf;
this.totalResubmitCount = totalCount;
}
@JsonIgnore
public long getLastHeartbeatAt() {
return lastHeartbeatAt;
}
@JsonIgnore
public void setLastHeartbeatAt(long lastHeartbeatAt) {
this.lastHeartbeatAt = lastHeartbeatAt;
}
private void validateStateChange(MantisJobState newState) throws InvalidJobStateChangeException {
if (!state.isValidStateChgTo(newState))
throw new InvalidJobStateChangeException(jobId, state, newState);
}
/**
* Added for use by new Mantis Master to reuse old DAOs
* Does not do state transition validation
*
* @param state
* @param when
* @param reason
*/
public void setStateNoValidation(MantisJobState state, long when, JobCompletedReason reason) {
this.state = state;
switch (state) {
case Accepted:
this.acceptedAt = when;
break;
case Launched:
this.launchedAt = when;
break;
case StartInitiated:
this.startingAt = when;
break;
case Started:
this.startedAt = when;
break;
case Failed:
this.completedAt = when;
this.reason = reason == null ? JobCompletedReason.Lost : reason;
break;
case Completed:
this.completedAt = when;
this.reason = reason == null ? JobCompletedReason.Normal : reason;
break;
default:
assert false : "Unexpected job state to set";
}
}
public void setState(MantisJobState state, long when, JobCompletedReason reason) throws InvalidJobStateChangeException {
validateStateChange(state);
this.state = state;
switch (state) {
case Accepted:
this.acceptedAt = when;
break;
case Launched:
this.launchedAt = when;
break;
case StartInitiated:
this.startingAt = when;
break;
case Started:
this.startedAt = when;
break;
case Failed:
this.completedAt = when;
this.reason = reason == null ? JobCompletedReason.Lost : reason;
break;
case Completed:
this.completedAt = when;
this.reason = reason == null ? JobCompletedReason.Normal : reason;
break;
default:
assert false : "Unexpected job state to set";
}
}
@Override
public MantisJobState getState() {
return state;
}
@Override
public String getSlave() {
return slave;
}
public void setSlave(String slave) {
this.slave = slave;
}
public Optional<String> getCluster() {
return cluster;
}
public void setCluster(final Optional<String> cluster) {
this.cluster = cluster;
}
@Override
public Optional<ClusterID> getResourceCluster() {
return resourceCluster;
}
public void setResourceCluster(Optional<ClusterID> resourceCluster) {
this.resourceCluster = resourceCluster;
}
@Override
public String getSlaveID() {
return slaveID;
}
public void setSlaveID(String slaveID) {
this.slaveID = slaveID;
}
@Override
public long getAcceptedAt() {
return acceptedAt;
}
public void setAcceptedAt(long when) {
this.acceptedAt = when;
}
@Override
public long getLaunchedAt() {
return launchedAt;
}
public void setLaunchedAt(long when) {
this.launchedAt = when;
}
@Override
public long getStartingAt() {
return startingAt;
}
public void setStartingAt(long when) {
this.startingAt = when;
}
@Override
public long getStartedAt() {
return startedAt;
}
public void setStartedAt(long when) {
this.startedAt = when;
}
@Override
public long getCompletedAt() {
return completedAt;
}
public void setCompletedAt(long when) {
this.completedAt = when;
}
@Override
public JobCompletedReason getReason() {
return reason;
}
public void setReason(JobCompletedReason reason) {
this.reason = reason;
}
@Override
public String toString() {
return "Worker " + workerNumber + " state=" + state + ", acceptedAt=" + acceptedAt +
((launchedAt == 0) ? "" : ", launchedAt=" + launchedAt) +
((startingAt == 0) ? "" : ", startingAt=" + startingAt) +
((startedAt == 0) ? "" : ", startedAt=" + startedAt) +
((completedAt == 0) ? "" : ", completedAt=" + completedAt) +
", #ports=" + ports.size() + ", ports=" + ports;
}
}
| 8,012 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/FileBasedStore.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.flink.util.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Simple File based storage provider. Intended mainly as a sample implementation for
* {@link KeyValueStore} interface. This implementation is complete in its functionality, but, isn't
* expected to be scalable or performant for production loads.
* <P>This implementation uses <code>/tmp/MantisSpool/</code> as the spool directory. The directory is created
* if not present already. It will fail only if either a file with that name exists or if a directory with that
* name exists but isn't writable.</P>
*/
public class FileBasedStore implements KeyValueStore {
private static final Logger logger = LoggerFactory.getLogger(FileBasedStore.class);
private final File rootDir;
private final ReentrantLock fileLock = new ReentrantLock();
public FileBasedStore() {
this(new File("/tmp/mantis_storage"));
}
public FileBasedStore(File rootDir) {
this.rootDir = rootDir;
final Path rootDirPath = Paths.get(rootDir.getPath());
try {
if (Files.notExists(rootDirPath)) {
Files.createDirectories(rootDirPath);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private Path makePath(String dir, String fileName) throws IOException {
Files.createDirectories(Paths.get(rootDir.getPath(), dir));
return Paths.get(rootDir.getPath(), dir, fileName);
}
public void reset() {
try {
FileUtils.deleteDirectory(Paths.get(rootDir.getPath()).toFile());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public Map<String, Map<String, String>> getAllRows(String tableName) throws IOException {
return getAllPartitionKeys(tableName).stream().map(partitionKey -> {
try {
return Pair.of(partitionKey, getAll(tableName, partitionKey));
} catch (Exception e) {
logger.warn("failed to read file for partitionKey {} because", partitionKey, e);
return null;
}})
.filter(Objects::nonNull)
.collect(Collectors.toMap(Pair::getLeft, Pair::getRight));
}
@Override
public List<String> getAllPartitionKeys(String tableName) throws IOException {
final Path tableRoot = Paths.get(this.rootDir.getPath(), tableName);
if (Files.notExists(tableRoot)) {
return Collections.emptyList();
}
try (Stream<Path> paths = Files.list(tableRoot)) {
return paths
.map(x -> x.getFileName().toString())
.collect(Collectors.toList());
}
}
@Override
public String get(String tableName, String partitionKey, String secondaryKey) throws IOException {
return getAll(tableName, partitionKey).get(secondaryKey);
}
@Override
public Map<String, String> getAll(String tableName, String partitionKey) throws IOException {
final Path filePath = makePath(tableName, partitionKey);
if (Files.notExists(filePath)) {
return new HashMap<>();
}
return Files.readAllLines(filePath)
.stream()
.map(line -> line.split(",", 2))
.collect(Collectors.toMap(tokens -> tokens[0], tokens -> tokens[1]));
}
@Override
public boolean upsert(String tableName, String partitionKey, String secondaryKey, String data) throws IOException {
fileLock.lock();
try {
final Map<String, String> items = getAll(tableName, partitionKey);
items.put(secondaryKey, data);
upsertAll(tableName, partitionKey, items);
return true;
} finally {
fileLock.unlock();
}
}
@Override
public boolean upsertAll(String tableName, String partitionKey, Map<String, String> all, Duration ttl) throws IOException {
final Path filePath = makePath(tableName, partitionKey);
final List<String> lines = all.entrySet().stream()
.map(e -> e.getKey() + "," + e.getValue())
.collect(Collectors.toList());
fileLock.lock();
try {
Files.write(filePath, lines);
return true;
} finally {
fileLock.unlock();
}
}
@Override
public boolean delete(String tableName, String partitionKey, String secondaryKey) throws IOException {
fileLock.lock();
try {
final Map<String, String> items = getAll(tableName, partitionKey);
items.remove(secondaryKey);
upsertAll(tableName, partitionKey, items);
return true;
} finally {
fileLock.unlock();
}
}
@Override
public boolean deleteAll(String tableName, String partitionKey) throws IOException {
final Path filePath = makePath(tableName, partitionKey);
fileLock.lock();
try {
return filePath.toFile().delete();
} finally {
fileLock.unlock();
}
}
}
| 8,013 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/MantisJobMetadataWritable.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.net.URL;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.ReentrantLock;
import lombok.Getter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
//import com.google.common.collect.Lists;
public class MantisJobMetadataWritable implements MantisJobMetadata {
private static final Logger logger = LoggerFactory.getLogger(MantisJobMetadataWritable.class);
@Getter
private final String user;
@Getter
private final JobSla sla;
@Getter
private final long subscriptionTimeoutSecs;
@Getter
private final List<Label> labels;
@JsonIgnore
private final ConcurrentMap<Integer, MantisStageMetadataWritable> stageMetadataMap;
@JsonIgnore
private final ConcurrentMap<Integer, Integer> workerNumberToStageMap;
@JsonIgnore
private final ReentrantLock lock = new ReentrantLock();
@Getter
private String jobId;
@Getter
private String name;
@Getter
private long submittedAt;
@Getter
private long startedAt = DEFAULT_STARTED_AT_EPOCH;
@Getter
private URL jarUrl;
@Getter
private volatile MantisJobState state;
@Getter
private final int numStages;
@Getter
private final List<Parameter> parameters;
@Getter
private int nextWorkerNumberToUse = 1;
@Getter
private final WorkerMigrationConfig migrationConfig;
@JsonIgnore
private Object sink; // ToDo need to figure out what object we store for sink
@Getter
private final long heartbeatIntervalSecs;
@Getter
private final long workerTimeoutSecs;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public MantisJobMetadataWritable(@JsonProperty("jobId") String jobId,
@JsonProperty("name") String name,
@JsonProperty("user") String user,
@JsonProperty("submittedAt") long submittedAt,
@JsonProperty("startedAt") long startedAt,
@JsonProperty("jarUrl") URL jarUrl,
@JsonProperty("numStages") int numStages,
@JsonProperty("sla") JobSla sla,
@JsonProperty("state") MantisJobState state,
@JsonProperty("workerTimeoutSecs") long workerTimeoutSecs,
@JsonProperty("heartbeatIntervalSecs") long heartbeatIntervalSecs,
@JsonProperty("subscriptionTimeoutSecs") long subscriptionTimeoutSecs,
@JsonProperty("parameters") List<Parameter> parameters,
@JsonProperty("nextWorkerNumberToUse") int nextWorkerNumberToUse,
@JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig,
@JsonProperty("labels") List<Label> labels) {
this.jobId = jobId;
this.name = name;
this.user = user;
this.submittedAt = submittedAt;
this.startedAt = startedAt;
this.jarUrl = jarUrl;
this.numStages = numStages;
this.sla = sla;
this.state = state == null ? MantisJobState.Accepted : state;
this.subscriptionTimeoutSecs = subscriptionTimeoutSecs;
this.heartbeatIntervalSecs = heartbeatIntervalSecs;
this.workerTimeoutSecs = workerTimeoutSecs;
this.stageMetadataMap = new ConcurrentHashMap<>();
this.workerNumberToStageMap = new ConcurrentHashMap<>();
if (parameters == null) {
this.parameters = new LinkedList<Parameter>();
} else {
this.parameters = parameters;
}
if (labels == null) {
this.labels = new LinkedList<>();
} else {
this.labels = labels;
}
this.nextWorkerNumberToUse = nextWorkerNumberToUse;
this.migrationConfig = Optional.ofNullable(migrationConfig).orElse(WorkerMigrationConfig.DEFAULT);
}
@Override
public AutoCloseable obtainLock() {
lock.lock();
return new AutoCloseable() {
@Override
public void close() throws IllegalMonitorStateException {
lock.unlock();
}
};
}
public void setNextWorkerNumberToUse(int n) {
this.nextWorkerNumberToUse = n;
}
void setJobState(MantisJobState state) throws InvalidJobStateChangeException {
if (!this.state.isValidStateChgTo(state))
throw new InvalidJobStateChangeException(jobId, this.state, state);
this.state = state;
}
@JsonIgnore
@Override
public Collection<? extends MantisStageMetadata> getStageMetadata() {
return stageMetadataMap.values();
}
@JsonIgnore
@Override
public MantisStageMetadata getStageMetadata(int stageNum) {
return stageMetadataMap.get(stageNum);
}
/**
* Add job stage if absent, returning true if it was actually added.
*
* @param msmd The stage's metadata object.
*
* @return true if actually added, false otherwise.
*/
public boolean addJobStageIfAbsent(MantisStageMetadataWritable msmd) {
return stageMetadataMap.putIfAbsent(msmd.getStageNum(), msmd) == null;
}
public boolean addWorkerMedata(int stageNum, MantisWorkerMetadata workerMetadata, MantisWorkerMetadata replacedWorker)
throws InvalidJobException {
final boolean result =
stageMetadataMap.get(stageNum)
.replaceWorkerIndex(workerMetadata, replacedWorker);
if (result) {
Integer integer = workerNumberToStageMap.put(workerMetadata.getWorkerNumber(), stageNum);
if (integer != null && integer != stageNum) {
logger.error(String.format("Unexpected to put worker number mapping from %d to stage %d for job %s, prev mapping to stage %d",
workerMetadata.getWorkerNumber(), stageNum, workerMetadata.getJobId(), integer));
}
}
return result;
}
@JsonIgnore
@Override
public MantisWorkerMetadata getWorkerByIndex(int stageNumber, int workerIndex) throws InvalidJobException {
MantisStageMetadata stage = stageMetadataMap.get(stageNumber);
if (stage == null)
throw new InvalidJobException(jobId, stageNumber, workerIndex);
return stage.getWorkerByIndex(workerIndex);
}
@JsonIgnore
@Override
public MantisWorkerMetadata getWorkerByNumber(int workerNumber) throws InvalidJobException {
Integer stageNumber = workerNumberToStageMap.get(workerNumber);
if (stageNumber == null)
throw new InvalidJobException(jobId, -1, workerNumber);
MantisStageMetadata stage = stageMetadataMap.get(stageNumber);
if (stage == null)
throw new InvalidJobException(jobId, stageNumber, workerNumber);
return stage.getWorkerByWorkerNumber(workerNumber);
}
@JsonIgnore
public int getMaxWorkerNumber() {
// Expected to be called only during initialization, no need to synchronize/lock.
// Resubmitted workers are expected to have a worker number greater than those they replace.
int max = -1;
for (int id : workerNumberToStageMap.keySet())
if (max < id) max = id;
return max;
}
@Override
public String toString() {
return "MantisJobMetadataWritable{" +
"user='" + user + '\'' +
", sla=" + sla +
", subscriptionTimeoutSecs=" + subscriptionTimeoutSecs +
", labels=" + labels +
", stageMetadataMap=" + stageMetadataMap +
", workerNumberToStageMap=" + workerNumberToStageMap +
", jobId='" + jobId + '\'' +
", name='" + name + '\'' +
", submittedAt=" + submittedAt +
", startedAt=" + startedAt +
", jarUrl=" + jarUrl +
", state=" + state +
", numStages=" + numStages +
", parameters=" + parameters +
", nextWorkerNumberToUse=" + nextWorkerNumberToUse +
", migrationConfig=" + migrationConfig +
'}';
}
}
| 8,014 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/InvalidNamedJobException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
public class InvalidNamedJobException extends Exception {
public InvalidNamedJobException(String message) {
super(message);
}
public InvalidNamedJobException(String message, Throwable cause) {
super(message, cause);
}
}
| 8,015 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/MantisStageMetadataWritable.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MantisStageMetadataWritable implements MantisStageMetadata {
private static final Logger logger = LoggerFactory.getLogger(MantisStageMetadataWritable.class);
@JsonIgnore
private final ConcurrentMap<Integer, MantisWorkerMetadata> workerByIndexMetadataSet;
@JsonIgnore
private final ConcurrentMap<Integer, MantisWorkerMetadata> workerByNumberMetadataSet;
private String jobId;
private int stageNum;
private int numStages;
private MachineDefinition machineDefinition;
private int numWorkers;
private List<JobConstraints> hardConstraints;
private List<JobConstraints> softConstraints;
private StageScalingPolicy scalingPolicy;
private boolean scalable;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public MantisStageMetadataWritable(@JsonProperty("jobId") String jobId,
@JsonProperty("stageNum") int stageNum,
@JsonProperty("numStages") int numStages,
@JsonProperty("machineDefinition") MachineDefinition machineDefinition,
@JsonProperty("numWorkers") int numWorkers,
@JsonProperty("hardConstraints") List<JobConstraints> hardConstraints,
@JsonProperty("softConstraints") List<JobConstraints> softConstraints,
@JsonProperty("scalingPolicy") StageScalingPolicy scalingPolicy,
@JsonProperty("scalable") boolean scalable) {
this.jobId = jobId;
this.stageNum = stageNum;
this.numStages = numStages;
this.machineDefinition = machineDefinition;
this.numWorkers = numWorkers;
this.hardConstraints = hardConstraints;
this.softConstraints = softConstraints;
this.scalingPolicy = scalingPolicy;
this.scalable = scalable;
workerByIndexMetadataSet = new ConcurrentHashMap<>();
workerByNumberMetadataSet = new ConcurrentHashMap<>();
}
@Override
public String getJobId() {
return jobId;
}
@Override
public int getStageNum() {
return stageNum;
}
@Override
public int getNumStages() {
return numStages;
}
@Override
public int getNumWorkers() {
return numWorkers;
}
@JsonIgnore
public int getNumActiveWorkers() {
// we traverse the current worker for each index
int active = 0;
for (MantisWorkerMetadata w : workerByIndexMetadataSet.values()) {
if (!MantisJobState.isTerminalState(w.getState()))
active++;
}
return active;
}
// This call is unsafe to be called by itself. Typically this is called from within a block that
// locks the corresponding job metadata object and also does the right things for reflecting upon the change. E.g., if increasing
// the number, then create the new workers. When decrementing, call the unsafeRemoveWorker() to remove
// the additional workers.
public void unsafeSetNumWorkers(int numWorkers) {
this.numWorkers = numWorkers;
}
public boolean unsafeRemoveWorker(int index, int number) {
final MantisWorkerMetadata removedIdx = workerByIndexMetadataSet.remove(index);
final MantisWorkerMetadata removedNum = workerByNumberMetadataSet.remove(number);
return removedIdx != null && removedNum != null && removedIdx.getWorkerNumber() == number &&
removedNum.getWorkerIndex() == index;
}
@Override
public List<JobConstraints> getHardConstraints() {
return hardConstraints;
}
@Override
public List<JobConstraints> getSoftConstraints() {
return softConstraints;
}
@Override
public StageScalingPolicy getScalingPolicy() {
return scalingPolicy;
}
public void setScalingPolicy(StageScalingPolicy scalingPolicy) {
this.scalingPolicy = scalingPolicy;
}
@Override
public boolean getScalable() {
return scalable;
}
public void setScalable(boolean scalable) {
this.scalable = scalable;
}
@Override
public MachineDefinition getMachineDefinition() {
return machineDefinition;
}
@JsonIgnore
@Override
public Collection<MantisWorkerMetadata> getWorkerByIndexMetadataSet() {
return workerByIndexMetadataSet.values();
}
@JsonIgnore
@Override
public Collection<MantisWorkerMetadata> getAllWorkers() {
return workerByNumberMetadataSet.values();
}
@JsonIgnore
@Override
public MantisWorkerMetadata getWorkerByIndex(int workerId) throws InvalidJobException {
MantisWorkerMetadata mwmd = workerByIndexMetadataSet.get(workerId);
if (mwmd == null)
throw new InvalidJobException(jobId, -1, workerId);
return mwmd;
}
@JsonIgnore
@Override
public MantisWorkerMetadata getWorkerByWorkerNumber(int workerNumber) throws InvalidJobException {
MantisWorkerMetadata mwmd = workerByNumberMetadataSet.get(workerNumber);
if (mwmd == null)
throw new InvalidJobException(jobId, -1, workerNumber);
return mwmd;
}
MantisWorkerMetadataWritable removeWorkerInErrorState(int workerNumber) {
MantisWorkerMetadataWritable mwmd = (MantisWorkerMetadataWritable) workerByNumberMetadataSet.get(workerNumber);
if (mwmd != null && MantisJobState.isErrorState(mwmd.getState())) {
workerByNumberMetadataSet.remove(workerNumber);
return mwmd;
}
return null;
}
Collection<MantisWorkerMetadataWritable> removeArchiveableWorkers() {
Collection<MantisWorkerMetadataWritable> removedWorkers = new LinkedList<>();
Set<Integer> workerNumbers = new HashSet<>(workerByNumberMetadataSet.keySet());
for (Integer w : workerNumbers) {
MantisWorkerMetadata mwmd = workerByNumberMetadataSet.get(w);
final MantisWorkerMetadata wi = workerByIndexMetadataSet.get(mwmd.getWorkerIndex());
if (wi == null || wi.getWorkerNumber() != mwmd.getWorkerNumber()) {
workerByNumberMetadataSet.remove(w);
removedWorkers.add((MantisWorkerMetadataWritable) mwmd);
}
}
return removedWorkers;
}
public boolean replaceWorkerIndex(MantisWorkerMetadata newWorker, MantisWorkerMetadata oldWorker)
throws InvalidJobException {
int index = newWorker.getWorkerIndex();
boolean result = true;
if (!MantisJobState.isErrorState(newWorker.getState())) {
if (oldWorker == null) {
if (workerByIndexMetadataSet.putIfAbsent(index, newWorker) != null) {
result = false;
}
} else {
if (oldWorker.getWorkerIndex() != index) {
throw new InvalidJobException(newWorker.getJobId(), stageNum, oldWorker.getWorkerIndex());
}
MantisWorkerMetadata mwmd = workerByIndexMetadataSet.put(index, newWorker);
if (mwmd.getWorkerNumber() != oldWorker.getWorkerNumber()) {
workerByIndexMetadataSet.put(index, mwmd);
result = false;
logger.info("Did not replace worker " + oldWorker.getWorkerNumber() + " with " +
newWorker.getWorkerNumber() + " for index " + newWorker.getWorkerIndex() + " of job " +
jobId + ", different worker " + mwmd.getWorkerNumber() + " exists already");
}
// else
// logger.info("Replaced worker " + oldWorker.getWorkerNumber() + " with " + newWorker.getWorkerNumber() +
// " for index " + newWorker.getWorkerIndex() + " of job " + jobId);
}
} else if (oldWorker != null)
result = false;
if (result)
workerByNumberMetadataSet.put(newWorker.getWorkerNumber(), newWorker);
return result;
}
}
| 8,016 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/InvalidJobException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
public class InvalidJobException extends Exception {
public InvalidJobException(String id) {
super(id);
}
public InvalidJobException(String id, Throwable cause) {
super(id, cause);
}
public InvalidJobException(String jobId, int stageNum, int workerId) {
super(jobId + ((stageNum >= 0) ? "-stage-" + stageNum : "") + ((workerId >= 0) ? "-worker-" + workerId : ""));
}
public InvalidJobException(String jobId, int stageNum, int workerId, Throwable cause) {
super(jobId + ((stageNum >= 0) ? "-stage-" + stageNum : "") + ((workerId >= 0) ? "-worker-" + workerId : ""), cause);
}
}
| 8,017 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/KeyValueStore.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
/**
* An abstraction for storage api that behaves like a key-value storage
* like apache-cassandra.
* See {@link FileBasedStore}
* for implementation using files.
* TODO(hmittal): Add an implementation using SQL, apache-cassandra
*/
public interface KeyValueStore {
KeyValueStore NO_OP = new NoopStore();
static KeyValueStore noop() {
return NO_OP;
}
static KeyValueStore inMemory() {
return new InMemoryStore();
}
/**
* Gets all rows from the table
*
* @param tableName the tableName/table to read from
* @return map partition key to map of secondary keys to actual data
*/
default Map<String, Map<String, String>> getAllRows(String tableName) throws IOException {
Map<String, Map<String, String>> results = new HashMap<>();
for (String pKey : getAllPartitionKeys(tableName)) {
results.computeIfAbsent(pKey, (k) -> new HashMap<>());
results.get(pKey).putAll(getAll(tableName, pKey));
}
return results;
}
/**
* Gets all partition keys from the table.
* This could be beneficial to call instead of getAllRows
* if the data volume in the table is large and you want
* to process rows iteratively.
*
* It iterates on partitionKey instead of primaryKey to
* prevent keys from the same partition coming out of order.
*
* @param tableName the table to read from
* @return list of all partition keys
*/
List<String> getAllPartitionKeys(String tableName) throws IOException;
/**
* Gets the row corresponding to primary key (partitionKey, secondaryKey)
* @param tableName the tableName/table to read from
* @param partitionKey partitionKey for the record
* @param secondaryKey secondaryKey for the record
* @return data
*/
default String get(String tableName, String partitionKey, String secondaryKey) throws IOException {
return getAll(tableName, partitionKey).get(secondaryKey);
}
/**
* Gets all rows corresponding to partition key
* @param tableName the tableName/table to read from
* @param partitionKey partitionKey for the record
* @return all records corresponding to partitionKey as a map of secondaryKey -> data
*/
Map<String, String> getAll(String tableName, String partitionKey) throws IOException;
/**
* Adds a row corresponding to primary key (partitionKey, secondaryKey)
* @param tableName the tableName/table to read from
* @param partitionKey partitionKey for the record
* @param secondaryKey secondaryKey for the record
* @param data the actual data
* @return boolean if the data was saved
*/
default boolean upsert(String tableName, String partitionKey, String secondaryKey, String data) throws IOException {
return upsertAll(tableName, partitionKey, ImmutableMap.of(secondaryKey, data));
}
/**
* Adds a row corresponding to primary key (partitionKey, secondaryKey)
* @param tableName the tableName/table to read from
* @param partitionKey partitionKey for the record
* @param secondaryKey secondaryKey for the record
* @param data the actual data
* @param ttl ttl for the record in millis
* @return boolean if the data was saved
*/
default boolean upsert(String tableName, String partitionKey, String secondaryKey, String data, Duration ttl) throws IOException {
return upsertAll(tableName, partitionKey, ImmutableMap.of(secondaryKey, data), ttl);
}
/**
* Adds all row corresponding to partition key.
* The rows are passed as a map of secondaryKey -> data
* @param tableName the tableName/table to read from
* @param partitionKey partitionKey for the record
* @param all map of rows
* @return boolean if the data was saved
*/
default boolean upsertAll(String tableName, String partitionKey, Map<String, String> all) throws IOException {
return upsertAll(tableName, partitionKey, all, Duration.ZERO);
}
/**
* Adds all row corresponding to partition key.
* The rows are passed as a map of secondaryKey -> data
* @param tableName the tableName/table to read from
* @param partitionKey partitionKey for the record
* @param all map of rows
* @param ttl ttl for the record in millis (use null or Duration.ZERO for no expiry)
* @return boolean if the data was saved
*/
boolean upsertAll(String tableName, String partitionKey, Map<String, String> all, Duration ttl) throws IOException;
/**
* Deletes a row corresponding to the primary key (partitionKey, secondaryKey)
* @param tableName the tableName/table to read from
* @param partitionKey partitionKey for the record
* @param secondaryKey secondaryKey for the record
* @return boolean if row was deleted
*/
boolean delete(String tableName, String partitionKey, String secondaryKey) throws IOException;
/**
* Deletes all rows corresponding to a partition key
* @param tableName the tableName/table to read from
* @param partitionKey partitionKey for the record
* @return boolean if the rows were deleted
*/
boolean deleteAll(String tableName, String partitionKey) throws IOException;
/**
* Helpful method to determine if a row exists in the table
* @param tableName the tableName/table to read from
* @param partitionKey partitionKey for the record
* @param secondaryKey secondaryKey for the record
* @return boolean if row exists
*/
default boolean isRowExists(String tableName, String partitionKey, String secondaryKey) throws IOException {
Map<String, String> items = getAll(tableName, partitionKey);
return items != null && items.containsKey(secondaryKey);
}
/**
* Allows searching for all rows that share the prefix (in secondary keys) for partitionKey
* @param tableName the tableName/table to read from
* @param partitionKey partitionKey for the record
* @param prefix secondaryKey for the record; null or blank values are default-ed to empty string
* @return
*/
default Map<String, String> getAllWithPrefix(String tableName, String partitionKey, String prefix) throws IOException {
String pr = StringUtils.defaultIfBlank(prefix, "");
return getAll(tableName, partitionKey).entrySet()
.stream().filter(x -> StringUtils.startsWith(x.getKey(), pr))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
class NoopStore implements KeyValueStore {
@Override
public Map<String, Map<String, String>> getAllRows(String tableName) {
return null;
}
@Override
public List<String> getAllPartitionKeys(String tableName) {
return null;
}
@Override
public String get(String tableName, String partitionKey, String secondaryKey) {
return null;
}
@Override
public Map<String, String> getAll(String tableName, String partitionKey) {
return null;
}
@Override
public boolean upsertAll(String tableName, String partitionKey, Map<String, String> all, Duration ttl) {
return false;
}
@Override
public boolean delete(String tableName, String partitionKey, String secondaryKey) {
return false;
}
@Override
public boolean deleteAll(String tableName, String partitionKey) {
return false;
}
}
class InMemoryStore implements KeyValueStore {
// table -> partitionKey -> secondaryKey -> data
private final Map<String, Map<String, Map<String, String>>> store = new ConcurrentHashMap<>();
@Override
public List<String> getAllPartitionKeys(String tableName) {
if (store.get(tableName) == null) {
return Collections.emptyList();
} else{
return new ArrayList<>(store.get(tableName).keySet());
}
}
@Override
public Map<String, String> getAll(String tableName, String partitionKey)
throws IOException {
if (store.get(tableName) == null) {
return Collections.emptyMap();
} else if (store.get(tableName).get(partitionKey) == null) {
return Collections.emptyMap();
} else {
return store.get(tableName).get(partitionKey);
}
}
@Override
public boolean upsertAll(String tableName, String partitionKey, Map<String, String> all,
Duration ttl) throws IOException {
store.putIfAbsent(tableName, new ConcurrentHashMap<>());
store.get(tableName).put(partitionKey, new ConcurrentHashMap<>(all));
return true;
}
@Override
public boolean delete(String tableName, String partitionKey, String secondaryKey)
throws IOException {
if (store.containsKey(tableName) && // table exists
store.get(tableName).containsKey(partitionKey) && // partitionKey exists
store.get(tableName).get(partitionKey).containsKey(secondaryKey)) { // secondaryKey exists
store.get(tableName).get(partitionKey).remove(secondaryKey);
return true;
}
return false;
}
@Override
public boolean deleteAll(String tableName, String partitionKey) throws IOException {
if (store.containsKey(tableName) && // table exists
store.get(tableName).containsKey(partitionKey)) { // partitionKey exists
store.get(tableName).remove(partitionKey);
return true;
}
return false;
}
}
}
| 8,018 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/InvalidJobStateChangeException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
import io.mantisrx.runtime.MantisJobState;
public class InvalidJobStateChangeException extends Exception {
public InvalidJobStateChangeException(String jobId, MantisJobState state) {
super("Unexpected state " + state + " for job " + jobId);
}
public InvalidJobStateChangeException(String jobId, MantisJobState state, Throwable t) {
super("Unexpected state " + state + " for job " + jobId, t);
}
public InvalidJobStateChangeException(String jobId, MantisJobState fromState, MantisJobState toState) {
super("Invalid state transition of job " + jobId + " from state " + fromState + " to " + toState);
}
public InvalidJobStateChangeException(String jobId, MantisJobState fromState, MantisJobState toState, Throwable cause) {
super("Invalid state transition of job " + jobId + " from state " + fromState + " to " + toState, cause);
}
}
| 8,019 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/NamedJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
import com.netflix.fenzo.triggers.CronTrigger;
import com.netflix.fenzo.triggers.TriggerOperator;
import com.netflix.fenzo.triggers.exceptions.SchedulerException;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.NamedJobDefinition;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.master.MantisJobOperations;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.net.URL;
import java.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.functions.Action1;
public class NamedJob {
private static final Logger logger = LoggerFactory.getLogger(NamedJob.class);
private static final int MaxValueForSlaMin = 5;
private static final int MaxValueForSlaMax = 100;
private final String name;
private final List<Jar> jars = new ArrayList<>();
private JobOwner owner;
private volatile SLA sla;
private List<Parameter> parameters;
private boolean isReadyForJobMaster = false;
private WorkerMigrationConfig migrationConfig;
private volatile long lastJobCount = 0;
private volatile boolean disabled = false;
private volatile boolean cronActive = false;
private volatile boolean isActive = true;
private MantisJobOperations jobOps;
private List<Label> labels;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public NamedJob(@JsonProperty("jobOps") MantisJobOperations jobOps, @JsonProperty("name") String name,
@JsonProperty("jars") List<Jar> jars, @JsonProperty("sla") SLA sla,
@JsonProperty("parameters") List<Parameter> parameters,
@JsonProperty("owner") JobOwner owner, @JsonProperty("lastJobCount") long lastJobCount,
@JsonProperty("disabled") boolean disabled,
@JsonProperty("isReadyForJobMaster") boolean isReadyForJobMaster,
@JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig,
@JsonProperty("labels") List<Label> labels) {
this.jobOps = jobOps;
this.name = name;
if (sla == null)
sla = new SLA(0, 0, null, null);
this.disabled = disabled;
this.isReadyForJobMaster = isReadyForJobMaster;
this.migrationConfig = Optional.ofNullable(migrationConfig).orElse(WorkerMigrationConfig.DEFAULT);
this.sla = sla;
try {
this.sla.validate();
} catch (InvalidNamedJobException e) {
logger.warn(name + ": disabling due to unexpected error validating sla: " + e.getMessage());
this.disabled = true;
}
if (labels != null) {
this.labels = labels;
} else {
this.labels = new LinkedList<>();
}
this.parameters = parameters;
this.owner = owner;
this.lastJobCount = lastJobCount;
if (jars != null) {
this.jars.addAll(jars);
}
}
public static String getJobId(String name, long number) {
return name + "-" + number;
}
static String getJobName(String jobId) {
return jobId.substring(0, jobId.lastIndexOf('-'));
}
private static long getJobIdNumber(String jobId) {
return Long.parseLong(jobId.substring(jobId.lastIndexOf('-') + 1));
}
@Override
public String toString() {
return "NamedJob [name=" + name + ", jars=" + jars + ", owner=" + owner + ", sla=" + sla + ", parameters="
+ parameters + ", isReadyForJobMaster=" + isReadyForJobMaster + ", migrationConfig=" + migrationConfig
+ ", lastJobCount=" + lastJobCount + ", disabled=" + disabled + ", isActive=" + isActive + ", labels="
+ labels + "]";
}
/* package */ void setJobOps(MantisJobOperations jobOps) {
this.jobOps = jobOps;
}
public String getName() {
return name;
}
public List<Jar> getJars() {
return Collections.unmodifiableList(jars);
}
public SLA getSla() {
return sla;
}
public List<Parameter> getParameters() {
return parameters;
}
void setParameters(List<Parameter> parameters) {
this.parameters = parameters;
}
public List<Label> getLabels() {
return this.labels;
}
void setLabels(List<Label> labels) {
this.labels = labels;
}
public JobOwner getOwner() {
return owner;
}
void setOwner(JobOwner owner) {
this.owner = owner;
}
public long getLastJobCount() {
return lastJobCount;
}
@JsonIgnore
public long getNextJobNumber() {
return ++lastJobCount;
}
public boolean getDisabled() {
return disabled;
}
public void setDisabled(boolean disabled) {
this.disabled = disabled;
// enforceSla(Optional.empty());
}
public boolean getIsReadyForJobMaster() {
return isReadyForJobMaster;
}
public void setIsReadyForJobMaster(boolean b) {
isReadyForJobMaster = b;
}
public WorkerMigrationConfig getMigrationConfig() {
return migrationConfig;
}
public void setMigrationConfig(final WorkerMigrationConfig migrationConfig) {
this.migrationConfig = migrationConfig;
}
@JsonIgnore
public boolean getIsActive() {
return isActive;
}
@JsonIgnore
public void setInactive() throws NamedJobDeleteException {
isActive = false;
}
public boolean getCronActive() {
return cronActive;
}
/**
* Get the Jar for the job that matches the <code>version</code> argument.
*
* @param version The version to match.
*
* @return Latest jar uploaded if <code>version</code> is <code>null</code> or empty, or jar whose version
* matches the argument, or null if no such version exists.
*/
@JsonIgnore
Jar getJar(String version) {
if (version == null || version.isEmpty())
return jars.get(jars.size() - 1);
for (Jar j : jars)
if (version.equals(j.version))
return j;
return null;
}
public static class CompletedJob {
private final String name;
private final String jobId;
private final String version;
private final MantisJobState state;
private final long submittedAt;
private final long terminatedAt;
private final String user;
private final List<Label> labels;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public CompletedJob(
@JsonProperty("name") String name,
@JsonProperty("jobId") String jobId,
@JsonProperty("version") String version,
@JsonProperty("state") MantisJobState state,
@JsonProperty("submittedAt") long submittedAt,
@JsonProperty("terminatedAt") long terminatedAt,
@JsonProperty("user") String user,
@JsonProperty("labels") List<Label> labels
) {
this.name = name;
this.jobId = jobId;
this.version = version;
this.state = state;
this.submittedAt = submittedAt;
this.terminatedAt = terminatedAt;
this.user = user;
if (labels != null) {
this.labels = labels;
} else {
this.labels = new ArrayList<>();
}
}
public String getName() {
return name;
}
public String getJobId() {
return jobId;
}
public String getVersion() {
return version;
}
public MantisJobState getState() {
return state;
}
public long getSubmittedAt() {
return submittedAt;
}
public long getTerminatedAt() {
return terminatedAt;
}
public String getUser() {
return user;
}
public List<Label> getLabels() { return labels; }
@Override
public String toString() {
return "CompletedJob{" +
"name='" + name + '\'' +
", jobId='" + jobId + '\'' +
", version='" + version + '\'' +
", state=" + state +
", submittedAt=" + submittedAt +
", terminatedAt=" + terminatedAt +
", user='" + user + '\'' +
", labels=" + labels +
'}';
}
}
public static class Jar {
private final URL url;
private final String version;
private final long uploadedAt;
private final SchedulingInfo schedulingInfo;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public Jar(@JsonProperty("url") URL url,
@JsonProperty("uploadedAt") long uploadedAt,
@JsonProperty("version") String version, @JsonProperty("schedulingInfo") SchedulingInfo schedulingInfo) {
this.url = url;
this.uploadedAt = uploadedAt;
this.version = (version == null || version.isEmpty()) ?
"" + System.currentTimeMillis() :
version;
this.schedulingInfo = schedulingInfo;
}
public URL getUrl() {
return url;
}
public long getUploadedAt() {
return uploadedAt;
}
public String getVersion() {
return version;
}
public SchedulingInfo getSchedulingInfo() {
return schedulingInfo;
}
}
public static class SLA {
@JsonIgnore
private static final TriggerOperator triggerOperator;
static {
triggerOperator = new TriggerOperator(1);
try {
triggerOperator.initialize();
} catch (SchedulerException e) {
logger.error("Unexpected: " + e.getMessage(), e);
throw new RuntimeException(e);
}
}
private final int min;
private final int max;
private final String cronSpec;
private final NamedJobDefinition.CronPolicy cronPolicy;
@JsonIgnore
private final boolean hasCronSpec;
@JsonIgnore
private final NamedJobDefinition.CronPolicy defaultPolicy = NamedJobDefinition.CronPolicy.KEEP_EXISTING;
@JsonIgnore
private CronTrigger<NamedJob> scheduledTrigger;
@JsonIgnore
private String triggerGroup = null;
@JsonIgnore
private String triggerId = null;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public SLA(
@JsonProperty("min") int min,
@JsonProperty("max") int max,
@JsonProperty("cronSpec") String cronSpec,
@JsonProperty("cronPolicy") NamedJobDefinition.CronPolicy cronPolicy
) {
if (cronSpec != null && !cronSpec.isEmpty()) {
this.cronSpec = cronSpec;
hasCronSpec = true;
this.max = 1;
this.min = 0;
this.cronPolicy = cronPolicy == null ? defaultPolicy : cronPolicy;
} else {
hasCronSpec = false;
this.min = min;
this.max = max;
this.cronSpec = null;
this.cronPolicy = null;
}
}
public int getMin() {
return min;
}
public int getMax() {
return max;
}
public String getCronSpec() {
return cronSpec;
}
public NamedJobDefinition.CronPolicy getCronPolicy() {
return cronPolicy;
}
private void validate() throws InvalidNamedJobException {
if (max < min)
throw new InvalidNamedJobException("Cannot have max=" + max + " < min=" + min);
if (min > MaxValueForSlaMin)
throw new InvalidNamedJobException("Specified min sla value " + min + " cannot be >" + MaxValueForSlaMin);
if (max > MaxValueForSlaMax)
throw new InvalidNamedJobException("Max sla value " + max + " cannot be >" + MaxValueForSlaMax);
}
// caller must lock to avoid concurrent access with destroyCron()
private void initCron(NamedJob job) throws SchedulerException {
// DISABLED AS Master V2 does not use this class for cron
}
// caller must lock to avoid concurrent access with initCron()
private void destroyCron() {
// DISABLED AS Master V2 does not use this class for cron
}
}
// Keep this public since Quartz needs to call it when triggering cron.
public static class CronTriggerAction implements Action1<NamedJob> {
@Override
public void call(NamedJob job) {
logger.info("Cron fired for " + job.getName());
}
}
}
| 8,020 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/JobAlreadyExistsException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
public class JobAlreadyExistsException extends Exception {
public JobAlreadyExistsException(String jobId) {
super(jobId);
}
public JobAlreadyExistsException(String jobId, Throwable cause) {
super(jobId, cause);
}
}
| 8,021 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/MantisStageMetadata.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import java.util.Collection;
import java.util.List;
public interface MantisStageMetadata {
String getJobId();
int getStageNum();
int getNumStages();
MachineDefinition getMachineDefinition();
int getNumWorkers();
List<JobConstraints> getHardConstraints();
List<JobConstraints> getSoftConstraints();
StageScalingPolicy getScalingPolicy();
boolean getScalable();
Collection<MantisWorkerMetadata> getWorkerByIndexMetadataSet();
Collection<MantisWorkerMetadata> getAllWorkers();
MantisWorkerMetadata getWorkerByIndex(int workerIndex) throws InvalidJobException;
MantisWorkerMetadata getWorkerByWorkerNumber(int workerNumber) throws InvalidJobException;
}
| 8,022 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/server/master/store/MantisWorkerMetadata.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.store;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import java.util.List;
import java.util.Optional;
/**
* Metadata object for a Mantis worker. Modification operations do not perform locking. Instead, a lock can be
* obtained via the <code>obtainLock()</code> method which is an instance of {@link AutoCloseable}.
*/
public interface MantisWorkerMetadata {
int getWorkerIndex();
int getWorkerNumber();
WorkerId getWorkerId();
String getJobId();
int getStageNum();
int getMetricsPort();
int getDebugPort();
int getConsolePort();
int getCustomPort();
// cluster on which the worker was launched
Optional<String> getCluster();
Optional<ClusterID> getResourceCluster();
/**
* Get number of ports for this worker, including the metrics port
*
* @return The number of ports
*/
int getNumberOfPorts();
List<Integer> getPorts();
void addPorts(List<Integer> ports);
int getTotalResubmitCount();
/**
* Get the worker number (not index) of which this is a resubmission of.
*
* @return
*/
int getResubmitOf();
MantisJobState getState();
String getSlave();
String getSlaveID();
long getAcceptedAt();
long getLaunchedAt();
long getStartingAt();
long getStartedAt();
long getCompletedAt();
JobCompletedReason getReason();
}
| 8,023 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/StringConstants.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master;
public class StringConstants {
// User for actions performed by Mantis master
public static final String MANTIS_MASTER_USER = "MantisMaster";
}
| 8,024 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/JobClustersManagerService.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master;
import static akka.pattern.PatternsCS.ask;
import akka.actor.ActorRef;
import akka.util.Timeout;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.server.core.BaseService;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JobClustersManagerService extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(JobClustersManagerService.class);
private final ActorRef jobClustersManagerActor;
private final MantisSchedulerFactory schedulerFactory;
private final boolean loadJobsFromStore;
public JobClustersManagerService(final ActorRef jobClustersManagerActor,
final MantisSchedulerFactory schedulerFactory,
final boolean loadJobsFromStore) {
super(true);
this.jobClustersManagerActor = jobClustersManagerActor;
this.schedulerFactory = schedulerFactory;
this.loadJobsFromStore = loadJobsFromStore;
}
@Override
public void start() {
super.awaitActiveModeAndStart(() -> {
// initialize job clusters manager
final CountDownLatch latch = new CountDownLatch(1);
final long startTime = System.currentTimeMillis();
try {
long masterInitTimeoutSecs = ConfigurationProvider.getConfig().getMasterInitTimeoutSecs();
CompletionStage<JobClusterManagerProto.JobClustersManagerInitializeResponse> initResponse =
ask(jobClustersManagerActor,
new JobClusterManagerProto.JobClustersManagerInitialize(schedulerFactory, loadJobsFromStore),
Timeout.apply(masterInitTimeoutSecs, TimeUnit.SECONDS))
.thenApply(JobClusterManagerProto.JobClustersManagerInitializeResponse.class::cast);
initResponse.whenComplete((resp, t) -> {
logger.info("JobClustersManagerActor init response {}", resp);
if (t != null || !resp.responseCode.equals(BaseResponse.ResponseCode.SUCCESS)) {
logger.error("failed to initialize JobClustersManagerActor, committing suicide...", t);
System.exit(3);
}
latch.countDown();
});
} catch (Exception e) {
logger.error("caught exception when initializing JobClustersManagerService, committing suicide...", e);
System.exit(3);
}
try {
latch.await();
} catch (InterruptedException e) {
logger.error("interrupted waiting for latch countdown during JobClustersManagerInitialize, committing suicide..", e);
System.exit(3);
}
logger.info("JobClustersManager initialize took {} sec",
TimeUnit.SECONDS.convert(System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS));
});
}
}
| 8,025 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/JobListHelperActor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master;
import static akka.pattern.PatternsCS.ask;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS;
import akka.actor.AbstractActor;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.util.Timeout;
import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter;
import io.mantisrx.master.jobcluster.MantisJobClusterMetadataView;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataView;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.schedulers.Schedulers;
import scala.concurrent.duration.Duration;
/**
* Helper Actor used by JobClustersManager for listing jobs and clusters.
* By offloading the scatter-gather to a separate Actor the JCM is free to move on to processing other messages.
*
* This Actor is stateless and can be part of a pool of actors if performance becomes a bottle neck
*/
public class JobListHelperActor extends AbstractActor {
private final Logger logger = LoggerFactory.getLogger(JobListHelperActor.class);
public static Props props() {
return Props.create(JobListHelperActor.class);
}
public JobListHelperActor() {
}
@Override
public Receive createReceive() {
return receiveBuilder()
.match(ListJobClusterRequestWrapper.class, this::onJobClustersList)
.match(ListJobRequestWrapper.class, this::onJobList)
.match(ListJobIdRequestWrapper.class, this::onJobIdList)
.matchAny(x -> logger.warn("Unexpected message {}", x))
.build();
}
private void onJobList(ListJobRequestWrapper request) {
ActorRef sender = getSender();
Timeout t = new Timeout(Duration.create(500, TimeUnit.MILLISECONDS));
List<MantisJobMetadataView> resultList = Lists.newArrayList();
getJobClustersMatchingRegex(request.jobClusterInfoMap.values(), request.listJobsRequest.getCriteria())
.flatMap((jobClusterInfo) -> {
CompletionStage<JobClusterManagerProto.ListJobsResponse> respCS = ask(jobClusterInfo.jobClusterActor, request.listJobsRequest, t)
.thenApply(JobClusterManagerProto.ListJobsResponse.class::cast);
return Observable.from(respCS.toCompletableFuture(), Schedulers.io())
.onErrorResumeNext(ex -> {
logger.warn("caught exception {}", ex.getMessage(), ex);
return Observable.empty();
});
})
.filter(Objects::nonNull)
.flatMapIterable((listJobsResp) -> listJobsResp.getJobList())
.toSortedList((o1, o2) -> Long.compare(o1.getJobMetadata().getSubmittedAt(),
o2.getJobMetadata().getSubmittedAt()))
.subscribeOn(Schedulers.computation())
.subscribe( resultList::addAll,
(e) -> {
request.sender.tell(new JobClusterManagerProto.ListJobsResponse(request.listJobsRequest.requestId, SERVER_ERROR, e.getMessage(), resultList), sender);
},() -> {
// todo limit is applied at cluster level as well if(request.listJobsRequest.getCriteria().getLimit().isPresent()) {
// int limit = request.listJobsRequest.getCriteria().getLimit().get();
// request.sender.tell(new JobClusterManagerProto.ListJobsResponse(request.listJobsRequest.requestId, SUCCESS, "", resultList.subList(0, Math.min(resultList.size(), limit))), sender);
// }
request.sender.tell(new JobClusterManagerProto.ListJobsResponse(request.listJobsRequest.requestId, SUCCESS, "", resultList), sender);
})
;
}
private void onJobIdList(ListJobIdRequestWrapper request) {
if(logger.isTraceEnabled()) { logger.trace("In onJobIdList {}", request); }
ActorRef sender = getSender();
Timeout t = new Timeout(Duration.create(500, TimeUnit.MILLISECONDS));
List<JobClusterProtoAdapter.JobIdInfo> resultList = Lists.newArrayList();
getJobClustersMatchingRegex(request.jobClusterInfoMap.values(),request.listJobIdsRequest.getCriteria())
.flatMap((jobClusterInfo) -> {
CompletionStage<JobClusterManagerProto.ListJobIdsResponse> respCS = ask(jobClusterInfo.jobClusterActor, request.listJobIdsRequest, t)
.thenApply(JobClusterManagerProto.ListJobIdsResponse.class::cast);
return Observable.from(respCS.toCompletableFuture(), Schedulers.io())
.onErrorResumeNext(ex -> {
logger.warn("caught exception {}", ex.getMessage(), ex);
return Observable.empty();
});
})
.filter(Objects::nonNull)
.map(JobClusterManagerProto.ListJobIdsResponse::getJobIds)
.subscribeOn(Schedulers.computation())
.subscribe(
resultList::addAll
,(error) -> {
logger.warn("Exception in JobListHelperActor:onJobIdList", error);
request.sender.tell(new JobClusterManagerProto.ListJobIdsResponse(request.listJobIdsRequest.requestId, SERVER_ERROR, error.getMessage(), resultList), sender);
},() -> {
if(logger.isTraceEnabled()) { logger.trace("Exit onJobIdList {}", resultList); }
// if(request.listJobIdsRequest.getCriteria().getLimit().isPresent()) {
// int limit = request.listJobIdsRequest.getCriteria().getLimit().get();
// request.sender.tell(new JobClusterManagerProto.ListJobIdsResponse(request.listJobIdsRequest.requestId, SUCCESS, "", resultList.subList(0, Math.min(resultList.size(), limit))), sender);
// }
if(logger.isTraceEnabled()) { logger.trace("Exit onJobIdList {}", resultList); }
request.sender.tell(new JobClusterManagerProto.ListJobIdsResponse(request.listJobIdsRequest.requestId, SUCCESS, "", resultList), sender);
});
}
private void onJobClustersList(ListJobClusterRequestWrapper request) {
if(logger.isTraceEnabled()) { logger.trace("In onJobClustersListRequest {}", request); }
ActorRef callerActor = getSender();
Timeout timeout = new Timeout(Duration.create(500, TimeUnit.MILLISECONDS));
List<MantisJobClusterMetadataView> clusterList = Lists.newArrayList();
Observable.from(request.jobClusterInfoMap.values())
.flatMap((jInfo) -> {
CompletionStage<JobClusterManagerProto.GetJobClusterResponse> respCS = ask(jInfo.jobClusterActor, new JobClusterManagerProto.GetJobClusterRequest(jInfo.clusterName), timeout)
.thenApply(JobClusterManagerProto.GetJobClusterResponse.class::cast);
return Observable.from(respCS.toCompletableFuture(), Schedulers.io())
.onErrorResumeNext(ex -> {
logger.warn("caught exception {}", ex.getMessage(), ex);
return Observable.empty();
});
})
.filter((resp) -> resp !=null && resp.getJobCluster().isPresent())
.map((resp) -> resp.getJobCluster().get())
//.collect((Func0<ArrayList<MantisJobClusterMetadataView>>) ArrayList::new,ArrayList::add)
.doOnError(this::logError)
.subscribeOn(Schedulers.computation())
//.toBlocking()
.subscribe(
clusterList::add
,(err) -> {
logger.warn("Exception in onJobClusterList ", err);
if(logger.isTraceEnabled()) { logger.trace("Exit onJobClustersListRequest {}", err); }
request.sender.tell(new JobClusterManagerProto.ListJobClustersResponse(request.listJobClustersRequest.requestId, SERVER_ERROR, err.getMessage(), clusterList), callerActor);
},() -> {
if(logger.isTraceEnabled()) { logger.trace("Exit onJobClustersListRequest {}", clusterList); }
request.sender.tell(new JobClusterManagerProto.ListJobClustersResponse(request.listJobClustersRequest.requestId, SUCCESS, "", clusterList), callerActor);
})
;
}
private void logError(Throwable e) {
logger.error("Exception occurred retrieving job cluster list {}", e.getMessage());
}
private Observable<JobClustersManagerActor.JobClusterInfo> getJobClustersMatchingRegex(Collection<JobClustersManagerActor.JobClusterInfo> jobClusterList, JobClusterManagerProto.ListJobCriteria criteria) {
return Observable.from(jobClusterList)
.filter((jcInfo) -> {
if(criteria.getMatchingRegex().isPresent()) {
try {
return Pattern.compile(criteria.getMatchingRegex().get(), Pattern.CASE_INSENSITIVE)
.matcher(jcInfo.clusterName).find();
} catch(Exception e) {
logger.warn("Invalid regex {}", e.getMessage());
return true;
}
} else {
return true;
}
});
}
static class ListJobClusterRequestWrapper {
private final JobClusterManagerProto.ListJobClustersRequest listJobClustersRequest;
private final ActorRef sender;
private final Map<String, JobClustersManagerActor.JobClusterInfo> jobClusterInfoMap;
public ListJobClusterRequestWrapper(final JobClusterManagerProto.ListJobClustersRequest request, final ActorRef sender, final Map<String, JobClustersManagerActor.JobClusterInfo> jobClusterInfoMap) {
this.jobClusterInfoMap = jobClusterInfoMap;
this.sender = sender;
this.listJobClustersRequest = request;
}
public JobClusterManagerProto.ListJobClustersRequest getListJobClustersRequest() {
return listJobClustersRequest;
}
public ActorRef getSender() {
return sender;
}
public Map<String, JobClustersManagerActor.JobClusterInfo> getJobClusterInfoMap() {
return jobClusterInfoMap;
}
}
static class ListJobRequestWrapper {
private final JobClusterManagerProto.ListJobsRequest listJobsRequest;
private final ActorRef sender;
private final Map<String, JobClustersManagerActor.JobClusterInfo> jobClusterInfoMap;
public ListJobRequestWrapper(JobClusterManagerProto.ListJobsRequest listJobsRequest, ActorRef sender, Map<String, JobClustersManagerActor.JobClusterInfo> jobClusterInfoMap) {
this.listJobsRequest = listJobsRequest;
this.sender = sender;
this.jobClusterInfoMap = jobClusterInfoMap;
}
}
static class ListJobIdRequestWrapper {
private final JobClusterManagerProto.ListJobIdsRequest listJobIdsRequest;
private final ActorRef sender;
private final Map<String, JobClustersManagerActor.JobClusterInfo> jobClusterInfoMap;
public ListJobIdRequestWrapper(JobClusterManagerProto.ListJobIdsRequest listJobIdsRequest, ActorRef sender, Map<String, JobClustersManagerActor.JobClusterInfo> jobClusterInfoMap) {
this.listJobIdsRequest = listJobIdsRequest;
this.sender = sender;
this.jobClusterInfoMap = jobClusterInfoMap;
}
}
}
| 8,026 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/JobClustersManagerActor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master;
import static akka.pattern.PatternsCS.ask;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_CONFLICT;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_NOT_FOUND;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS_CREATED;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.CreateJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.CreateJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DeleteJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DeleteJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.JobClustersManagerInitialize;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.JobClustersManagerInitializeResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.KillJobRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.KillJobResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListCompletedJobsInClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListCompletedJobsInClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobClustersRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobClustersResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListWorkersRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListWorkersResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ReconcileJobCluster;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLAResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse;
import static java.util.Optional.empty;
import static java.util.Optional.ofNullable;
import akka.actor.AbstractActorWithTimers;
import akka.actor.ActorPaths;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.SupervisorStrategy;
import akka.actor.Terminated;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.GaugeCallback;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.master.akka.MantisActorSupervisorStrategy;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.jobcluster.IJobClusterMetadata;
import io.mantisrx.master.jobcluster.JobClusterActor;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.master.jobcluster.job.JobHelper;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLARequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateSchedulingInfoResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterProto;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.domain.IJobClusterDefinition;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.stream.Collectors;
import lombok.Value;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.schedulers.Schedulers;
/*
Supervisor Actor responsible for creating/deletion/listing of all Job Clusters in the system
*/
public class JobClustersManagerActor extends AbstractActorWithTimers implements IJobClustersManager {
private static final String CHECK_CLUSTERS_TIMER_KEY = "CHECK_CLUSTER_TIMER";
public static final int STATE_TRANSITION_TIMEOUT_MSECS = 5000;
private final Logger logger = LoggerFactory.getLogger(JobClustersManagerActor.class);
private final long checkAgainInSecs = 30;
private final Counter numJobClusterInitFailures;
private final Counter numJobClusterInitSuccesses;
private Receive initializedBehavior;
public static Props props(final MantisJobStore jobStore, final LifecycleEventPublisher eventPublisher, final CostsCalculator costsCalculator) {
return Props.create(JobClustersManagerActor.class, jobStore, eventPublisher, costsCalculator)
.withMailbox("akka.actor.metered-mailbox");
}
private final MantisJobStore jobStore;
private final LifecycleEventPublisher eventPublisher;
private final CostsCalculator costsCalculator;
private MantisSchedulerFactory mantisSchedulerFactory = null;
JobClusterInfoManager jobClusterInfoManager;
private ActorRef jobListHelperActor;
public JobClustersManagerActor(final MantisJobStore store, final LifecycleEventPublisher eventPublisher, final CostsCalculator costsCalculator) {
this.jobStore = store;
this.eventPublisher = eventPublisher;
this.costsCalculator = costsCalculator;
MetricGroupId metricGroupId = getMetricGroupId();
Metrics m = new Metrics.Builder()
.id(metricGroupId)
.addCounter("numJobClusterInitFailures")
.addCounter("numJobClusterInitSuccesses")
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
this.numJobClusterInitFailures = m.getCounter("numJobClusterInitFailures");
this.numJobClusterInitSuccesses = m.getCounter("numJobClusterInitSuccesses");
initializedBehavior = getInitializedBehavior();
}
MetricGroupId getMetricGroupId() {
return new MetricGroupId("JobClustersManagerActor");
}
/**
* JobClusterManager Actor behaviors 27 total
* - Init
* // CLUSTER RELATED
* - CreateJC
* - InitalizeJCResponse
* - DeleteJC
* - DeleteJCResponse
* - UpdateJC
* - UpdateLabel
* - UpdateSLA
* - UpdateArtifact
* - UpdateMigrationStrat
* - ENABLE JC
* - DISABLE JC
* - GET CLUSTER
* - LIST completed jobs
* - GET LAST SUBMITTED JOB
* - LIST archived workers
*
* - LIST JCs
* - LIST JOBS
* - LIST JOB IDS
* - LIST WORKERS -> (pass thru to each Job Actor)
* *
* // pass thru to JOB
* - SUBMIT JOB -> (INIT JOB on Job Actor)
* - KILL JOB -> (pass thru Job Actor)
* - GET JOB -> (pass thru Job Actor)
* - GET JOB SCHED INFO -> (pass thru Job Actor)
* - SCALE JOB -> (pass thru Job Actor)
* - RESUBMIT WORKER -> (pass thru Job Actor)
*
* - WORKER EVENT -> (pass thru Job Actor)
* @return
*/
private Receive getInitializedBehavior() {
String state = "initialized";
return receiveBuilder()
.match(ReconcileJobCluster.class, this::onReconcileJobClusters)
// Specific Job Cluster related messages
.match(CreateJobClusterRequest.class, this::onJobClusterCreate)
.match(JobClusterProto.InitializeJobClusterResponse.class, this::onJobClusterInitializeResponse)
.match(DeleteJobClusterRequest.class, this::onJobClusterDelete)
.match(JobClusterProto.DeleteJobClusterResponse.class, this::onJobClusterDeleteResponse)
.match(UpdateJobClusterRequest.class, this::onJobClusterUpdate)
.match(UpdateJobClusterSLARequest.class, this::onJobClusterUpdateSLA)
.match(UpdateJobClusterArtifactRequest.class, this::onJobClusterUpdateArtifact)
.match(UpdateSchedulingInfo.class, this::onJobClusterUpdateSchedulingInfo)
.match(UpdateJobClusterLabelsRequest.class, this::onJobClusterUpdateLabels)
.match(UpdateJobClusterWorkerMigrationStrategyRequest.class, this::onJobClusterUpdateWorkerMigrationConfig)
.match(EnableJobClusterRequest.class, this::onJobClusterEnable)
.match(DisableJobClusterRequest.class, this::onJobClusterDisable)
.match(GetJobClusterRequest.class, this::onJobClusterGet)
.match(ListCompletedJobsInClusterRequest.class, this::onJobListCompleted)
.match(GetLastSubmittedJobIdStreamRequest.class, this::onGetLastSubmittedJobIdSubject)
.match(ListArchivedWorkersRequest.class, this::onListArchivedWorkers)
// List Job Cluster related messages
.match(ListJobClustersRequest.class, this::onJobClustersList)
// List Jobs related messages
.match(ListJobsRequest.class, this::onJobList)
.match(ListJobIdsRequest.class, this::onJobIdList)
.match(ListWorkersRequest.class, this::onListActiveWorkers)
//delegate to job
.match(SubmitJobRequest.class, this::onJobSubmit)
.match(KillJobRequest.class, this::onJobKillRequest)
// .match(JobClusterProto.KillJobResponse.class, this::onJobKillResponse)
.match(GetJobDetailsRequest.class, this::onGetJobDetailsRequest)
.match(GetJobSchedInfoRequest.class, this::onGetJobStatusSubject)
.match(GetLatestJobDiscoveryInfoRequest.class, this::onGetLatestJobDiscoveryInfo)
.match(ScaleStageRequest.class, this::onScaleStage)
.match(ResubmitWorkerRequest.class, this::onResubmitWorker)
//delegate to worker
.match(WorkerEvent.class, this::onWorkerEvent)
.match(Terminated.class, this::onTerminated)
// Unexpected
.match(JobClustersManagerInitialize.class, (x) -> getSender().tell(new JobClustersManagerInitializeResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state) ), getSelf()))
.matchAny(x -> logger.warn("unexpected message {} received by Job Cluster Manager actor. In initialized state ", x))
.build();
}
private String genUnexpectedMsg(String event, String state) {
return String.format("Unexpected message %s received by JobClustersManager actor in %s State", event, state);
}
private Receive getInitializingBehavior() {
String state = "initializing";
return receiveBuilder()
// EXPECTED MESSAGES BEGIN
.match(JobClustersManagerInitialize.class, this::initialize)
// EXPECTED MESSAGES END
// UNEXPECTED MESSAGES BEGIN
.match(ReconcileJobCluster.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), state)))
.match(CreateJobClusterRequest.class, (x) -> getSender().tell(new CreateJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), x.getJobClusterDefinition().getName()), getSelf()))
.match(JobClusterProto.InitializeJobClusterResponse.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), state)))
.match(DeleteJobClusterRequest.class, (x) -> getSender().tell(new DeleteJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf()))
.match(JobClusterProto.DeleteJobClusterResponse.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), state)))
.match(UpdateJobClusterRequest.class, (x) -> getSender().tell(new UpdateJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf()))
.match(UpdateJobClusterSLARequest.class, (x) -> getSender().tell(new UpdateJobClusterSLAResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf()))
.match(UpdateJobClusterArtifactRequest.class, (x) -> getSender().tell(new UpdateJobClusterArtifactResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf()))
.match(UpdateSchedulingInfo.class, (x) -> getSender().tell(new UpdateSchedulingInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf()))
.match(UpdateJobClusterLabelsRequest.class, (x) -> getSender().tell(new UpdateJobClusterLabelsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf()))
.match(UpdateJobClusterWorkerMigrationStrategyRequest.class, (x) -> getSender().tell(new UpdateJobClusterWorkerMigrationStrategyResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf()))
.match(EnableJobClusterRequest.class, (x) -> getSender().tell(new EnableJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf()))
.match(DisableJobClusterRequest.class, (x) -> getSender().tell(new DisableJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf()))
.match(GetJobClusterRequest.class, (x) -> getSender().tell(new GetJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), empty()), getSelf()))
.match(ListCompletedJobsInClusterRequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), state)))
.match(GetLastSubmittedJobIdStreamRequest.class, (x) -> getSender().tell(new GetLastSubmittedJobIdStreamResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), empty()), getSelf()))
.match(ListArchivedWorkersRequest.class, (x) -> getSender().tell(new ListArchivedWorkersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), Lists.newArrayList()), getSelf()))
.match(ListJobClustersRequest.class, (x) -> getSender().tell(new ListJobClustersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), Lists.newArrayList()), getSelf()))
.match(ListJobsRequest.class, (x) -> getSender().tell(new ListJobsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), Lists.newArrayList()), getSelf()))
.match(ListJobIdsRequest.class, (x) -> getSender().tell(new ListJobIdsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), Lists.newArrayList()), getSelf()))
.match(ListWorkersRequest.class, (x) -> getSender().tell(new ListWorkersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), Lists.newArrayList()), getSelf()))
.match(SubmitJobRequest.class, (x) -> getSender().tell(new SubmitJobResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), empty()), getSelf()))
.match(KillJobRequest.class, (x) -> getSender().tell(new KillJobResponse(x.requestId, CLIENT_ERROR, JobState.Noop, genUnexpectedMsg(x.toString(), state), x.getJobId(), x.getUser()), getSelf()))
.match(JobClusterProto.KillJobResponse.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), state)))
.match(GetJobDetailsRequest.class, (x) -> getSender().tell(new GetJobDetailsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), empty()), getSelf()))
.match(GetJobSchedInfoRequest.class, (x) -> getSender().tell(new GetJobSchedInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), empty()), getSelf()))
.match(GetLatestJobDiscoveryInfoRequest.class, (x) -> getSender().tell(new GetLatestJobDiscoveryInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), empty()), getSelf()))
.match(ScaleStageRequest.class, (x) -> getSender().tell(new ScaleStageResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), 0), getSelf()))
.match(ResubmitWorkerRequest.class, (x) -> getSender().tell(new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf()))
.match(WorkerEvent.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), state)))
// everything else
.matchAny(x -> logger.warn("unexpected message {} received by Job Cluster Manager actor. It needs to be initialized first ", x))
// UNEXPECTED MESSAGES BEGIN
.build();
}
private void initialize(JobClustersManagerInitialize initMsg) {
ActorRef sender = getSender();
try {
logger.info("In JobClustersManagerActor:initialize");
this.jobListHelperActor = getContext().actorOf(JobListHelperActor.props(), "JobListHelperActor");
getContext().watch(jobListHelperActor);
mantisSchedulerFactory = initMsg.getScheduler();
Map<String, IJobClusterMetadata> jobClusterMap = new HashMap<>();
this.jobClusterInfoManager = new JobClusterInfoManager(jobStore, mantisSchedulerFactory, eventPublisher, costsCalculator);
if (!initMsg.isLoadJobsFromStore()) {
getContext().become(initializedBehavior);
sender.tell(new JobClustersManagerInitializeResponse(initMsg.requestId, SUCCESS, "JobClustersManager successfully inited"), getSelf());
} else {
List<IJobClusterMetadata> jobClusters = jobStore.loadAllJobClusters();
logger.info("Read {} job clusters from storage", jobClusters.size());
List<IMantisJobMetadata> activeJobs = jobStore.loadAllActiveJobs();
logger.info("Read {} jobs from storage", activeJobs.size());
for (IJobClusterMetadata jobClusterMeta : jobClusters) {
String clusterName = jobClusterMeta.getJobClusterDefinition().getName();
jobClusterMap.put(clusterName, jobClusterMeta);
}
Map<String, List<IMantisJobMetadata>> clusterToJobMap = new HashMap<>();
// group jobs by cluster
for (IMantisJobMetadata jobMeta : activeJobs) {
String clusterName = jobMeta.getClusterName();
clusterToJobMap.computeIfAbsent(clusterName, k -> new ArrayList<>()).add(jobMeta);
}
long masterInitTimeoutSecs = ConfigurationProvider.getConfig().getMasterInitTimeoutSecs();
long timeout = ((masterInitTimeoutSecs - 60)) > 0 ? (masterInitTimeoutSecs - 60) : masterInitTimeoutSecs;
Observable.from(jobClusterMap.values())
.filter((jobClusterMeta) -> jobClusterMeta != null && jobClusterMeta.getJobClusterDefinition() != null)
.flatMap((jobClusterMeta) -> {
Duration t = Duration.ofSeconds(timeout);
Optional<JobClusterInfo> jobClusterInfoO = jobClusterInfoManager.createClusterActorAndRegister(jobClusterMeta.getJobClusterDefinition());
if (!jobClusterInfoO.isPresent()) {
logger.info("skipping job cluster {} on bootstrap as actor creating failed", jobClusterMeta.getJobClusterDefinition().getName());
return Observable.empty();
}
JobClusterInfo jobClusterInfo = jobClusterInfoO.get();
List<IMantisJobMetadata> jobList = Lists.newArrayList();
List<IMantisJobMetadata> jList = clusterToJobMap.get(jobClusterMeta.getJobClusterDefinition().getName());
if (jList != null) {
jobList.addAll(jList);
}
List<CompletedJob> completedJobsList = Lists.newArrayList();
JobClusterProto.InitializeJobClusterRequest req = new JobClusterProto.InitializeJobClusterRequest((JobClusterDefinitionImpl) jobClusterMeta.getJobClusterDefinition(),
jobClusterMeta.isDisabled(), jobClusterMeta.getLastJobCount(), jobList, completedJobsList, "system", getSelf(), false);
return jobClusterInfoManager.initializeCluster(jobClusterInfo, req, t);
})
.filter(Objects::nonNull)
.toBlocking()
.subscribe((clusterInit) -> {
logger.info("JobCluster {} inited with code {}", clusterInit.jobClusterName, clusterInit.responseCode);
numJobClusterInitSuccesses.increment();
}, (error) -> {
logger.warn("Exception initializing clusters {}", error.getMessage(), error);
logger.error("JobClusterManagerActor had errors during initialization NOT transitioning to initialized behavior");
// getContext().become(initializedBehavior);
sender.tell(new JobClustersManagerInitializeResponse(initMsg.requestId, SERVER_ERROR, "JobClustersManager inited with errors"), getSelf());
}, () -> {
logger.info("JobClusterManagerActor transitioning to initialized behavior");
getContext().become(initializedBehavior);
sender.tell(new JobClustersManagerInitializeResponse(initMsg.requestId, SUCCESS, "JobClustersManager successfully inited"), getSelf());
});
getTimers().startPeriodicTimer(CHECK_CLUSTERS_TIMER_KEY, new ReconcileJobCluster(), Duration.ofSeconds(checkAgainInSecs));
// kick off loading of archived jobs
logger.info("Kicking off archived job load asynchronously");
jobStore.loadAllArchivedJobsAsync();
}
} catch(Exception e) {
logger.error("caught exception", e);
sender.tell(new JobClustersManagerInitializeResponse(initMsg.requestId, SERVER_ERROR, e.getMessage()), getSelf());
}
logger.info("JobClustersManagerActor:initialize ends");
}
@Override
public void onReconcileJobClusters(ReconcileJobCluster p) {
Set<JobClusterInfo> jobClusterInfos = this.jobClusterInfoManager.getAllJobClusterInfo().values().stream()
.filter((jci) -> ((jci.currentState == JobClusterInfo.JobClusterState.INITIALIZING || jci.currentState == JobClusterInfo.JobClusterState.DELETING)
&& (p.timeOfEnforcement.toEpochMilli() - jci.stateUpdateTime) > STATE_TRANSITION_TIMEOUT_MSECS))
.collect(Collectors.toSet());
if(jobClusterInfos.size() > 0) {
logger.warn("{} JobClusters stuck in initializing/deleting state ", jobClusterInfos.size());
jobClusterInfos.stream().forEach((jci) -> {
if(jci.currentState.equals(JobClusterInfo.JobClusterState.INITIALIZING)) {
// retry init request
logger.warn("Retrying init on JobCluster {} stuck in {} state since {}", jci.clusterName, jci.currentState, jci.stateUpdateTime);
jci.stateUpdateTime = p.timeOfEnforcement.toEpochMilli();
jci.jobClusterActor.tell(jci.initRequest, getSelf());
} else { // in pending delete state
logger.warn("Deregistering JobCluster {} stuck in {} state since {}", jci.clusterName, jci.currentState, jci.stateUpdateTime);
jobClusterInfoManager.deregisterJobCluster(jci.clusterName);
}
});
}
}
@Override
public void onJobClusterCreate(final CreateJobClusterRequest request) {
final String name = request.getJobClusterDefinition().getName();
if (!jobClusterInfoManager.isClusterExists(name)) {
try {
Optional<JobClusterInfo> jobClusterInfoO = jobClusterInfoManager.createClusterActorAndRegister(request.getJobClusterDefinition());
if (jobClusterInfoO.isPresent()) {
jobClusterInfoManager.initializeClusterAsync(jobClusterInfoO.get(), new JobClusterProto.InitializeJobClusterRequest(request.getJobClusterDefinition(), request.getUser(), getSender()));
} else {
getSender().tell(new CreateJobClusterResponse(
request.requestId, CLIENT_ERROR,
"Job Cluster " + request.getJobClusterDefinition().getName() + " could not be created due to invalid name",
request.getJobClusterDefinition().getName()), getSelf());
}
} catch (Exception e) {
getSender().tell(new CreateJobClusterResponse(
request.requestId, SERVER_ERROR,
"Job Cluster " + request.getJobClusterDefinition().getName() + " could not be created due to " + e.getMessage(),
request.getJobClusterDefinition().getName()), getSelf());
}
} else {
getSender().tell(new CreateJobClusterResponse(
request.requestId, CLIENT_ERROR_CONFLICT,
"Job Cluster " + request.getJobClusterDefinition().getName() + " already exists",
request.getJobClusterDefinition().getName()), getSelf());
}
}
@Override
public void onJobClusterInitializeResponse(final JobClusterProto.InitializeJobClusterResponse createResp) {
logger.info("Got JobClusterInitializeResponse {}", createResp);
jobClusterInfoManager.processInitializeResponse(createResp);
}
@Override
public void onJobClusterDelete(final DeleteJobClusterRequest request) {
jobClusterInfoManager.processDeleteRequest(request);
}
@Override
public void onJobClusterDeleteResponse(final JobClusterProto.DeleteJobClusterResponse resp) {
jobClusterInfoManager.processDeleteResponse(resp);
}
@Override
public void onJobClusterUpdate(final UpdateJobClusterRequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getJobClusterDefinition().getName());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(new UpdateJobClusterResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getJobClusterDefinition().getName() + " doesn't exist"), getSelf());
}
}
@Override
public void onJobClustersList(final ListJobClustersRequest request) {
if(logger.isDebugEnabled()) { logger.info("In onJobClustersListRequest {}", request); }
ActorRef sender = getSender();
Map<String, JobClusterInfo> jobClusterInfoMap = jobClusterInfoManager.getAllJobClusterInfo();
jobListHelperActor.tell(new JobListHelperActor.ListJobClusterRequestWrapper(request,sender,jobClusterInfoMap),getSelf());
}
@Override
public void onJobClusterGet(GetJobClusterRequest r) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(r.getJobClusterName());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(r, getContext());
} else {
sender.tell(new GetJobClusterResponse(r.requestId, CLIENT_ERROR_NOT_FOUND, "No such Job cluster " + r.getJobClusterName(), empty()), getSelf());
}
}
@Override
public void onGetLastSubmittedJobIdSubject(GetLastSubmittedJobIdStreamRequest r) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(r.getClusterName());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(r, getContext());
} else {
sender.tell(new GetLastSubmittedJobIdStreamResponse(r.requestId, CLIENT_ERROR_NOT_FOUND, "No such Job cluster " + r.getClusterName(), empty()), getSelf());
}
}
@Override
public void onWorkerEvent(WorkerEvent workerEvent) {
if(logger.isDebugEnabled()) { logger.debug("Entering JobClusterManagerActor:onWorkerEvent {}", workerEvent); }
String clusterName = workerEvent.getWorkerId().getJobCluster();
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(clusterName);
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(workerEvent, getContext());
} else {
if(!JobHelper.isTerminalWorkerEvent(workerEvent)) {
logger.warn("Event from Worker {} for a cluster {} that no longer exists. Terminate worker", workerEvent, workerEvent.getWorkerId().getJobCluster());
Optional<String> host = JobHelper.getWorkerHostFromWorkerEvent(workerEvent);
Optional<JobDefinition> archivedJobDefinition =
jobClusterInfoManager.getArchivedJobDefinition(workerEvent.getWorkerId().getJobId());
if (archivedJobDefinition.isPresent()) {
mantisSchedulerFactory
.forJob(archivedJobDefinition.get())
.unscheduleAndTerminateWorker(workerEvent.getWorkerId(), host);
} else {
logger.error("Non-Terminal Event {} from worker {} for a cluster {} that no longer exists and the job definition not yet archived", workerEvent, workerEvent.getWorkerId(), workerEvent.getWorkerId().getJobCluster());
}
} else {
logger.warn("Terminal Event from Worker {} for a cluster {} that no longer exists. Ignore worker", workerEvent, workerEvent.getWorkerId().getJobCluster());
}
}
}
private void onTerminated(final Terminated terminated) {
logger.warn("onTerminated {}", terminated.actor());
}
//////////////////// JOB OPERATIONS ////////////////////////////////////////////////
@Override
public void onJobSubmit(final SubmitJobRequest request) {
logger.info("Submitting job " + request);
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(new SubmitJobResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "Job Cluster " + request.getClusterName() + " doesn't exist", empty()), getSelf());
}
}
@Override
public void onJobKillRequest(final KillJobRequest request) {
logger.info("Killing job " + request);
ActorRef sender = getSender();
JobId jobIdToKill = request.getJobId();
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(jobIdToKill.getCluster());
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.tell(
new JobClusterProto.KillJobRequest(request.getJobId(), request.getReason(),
JobCompletedReason.Killed, request.getUser(), sender), getSelf());
} else {
logger.info("Job cluster {} not found", jobIdToKill.getCluster());
sender.tell(new KillJobResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, JobState.Noop, "Job cluster " + jobIdToKill.getCluster() + " doesn't exist", jobIdToKill, request.getUser()), getSelf());
}
}
////////////////////// JOB OPERATIONS END //////////////////////////////////////////////
@Override
public void preStart() throws Exception {
logger.info("JobClusterManager Actor started");
super.preStart();
}
@Override
public void postStop() throws Exception {
logger.info("JobClusterManager Actor stopped");
super.postStop();
}
@Override
public void preRestart(Throwable t, Optional<Object> m) throws Exception {
logger.info("preRestart {} (exc: {})", m, t.getMessage());
// do not kill all children, which is the default here
// super.preRestart(t, m);
}
@Override
public void postRestart(Throwable reason) throws Exception {
logger.info("postRestart (exc={})", reason.getMessage());
super.postRestart(reason);
}
@Override
public SupervisorStrategy supervisorStrategy() {
// custom supervisor strategy to resume the Actor on Exception instead of the default restart
return MantisActorSupervisorStrategy.getInstance().create();
}
@Override
public Receive createReceive() {
return getInitializingBehavior();
}
private void logError(Throwable e) {
logger.error("Exception occurred retrieving job cluster list {}", e.getMessage());
}
@Override
public void onJobClusterUpdateSLA(UpdateJobClusterSLARequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(new UpdateJobClusterSLAResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist"), getSelf());
}
}
@Override
public void onJobClusterUpdateArtifact(UpdateJobClusterArtifactRequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(new UpdateJobClusterArtifactResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist"), getSelf());
}
}
@Override
public void onJobClusterUpdateSchedulingInfo(UpdateSchedulingInfo request) {
ActorRef sender = getSender();
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName());
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(
new UpdateJobClusterArtifactResponse(
request.getRequestId(),
CLIENT_ERROR_NOT_FOUND,
"JobCluster " + request.getClusterName() + " doesn't exist"),
getSelf());
}
}
@Override
public void onJobClusterUpdateLabels(UpdateJobClusterLabelsRequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(new UpdateJobClusterLabelsResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist"), getSelf());
}
}
@Override
public void onJobClusterUpdateWorkerMigrationConfig(UpdateJobClusterWorkerMigrationStrategyRequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(new UpdateJobClusterWorkerMigrationStrategyResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist"), getSelf());
}
}
@Override
public void onJobClusterEnable(EnableJobClusterRequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(new EnableJobClusterResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist"), getSelf());
}
}
@Override
public void onJobClusterDisable(DisableJobClusterRequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(new DisableJobClusterResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist"), getSelf());
}
}
@Override
public void onGetJobDetailsRequest(GetJobDetailsRequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getJobId().getCluster());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(new GetJobDetailsResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "Job " + request.getJobId().getId() + " doesn't exist", empty()), getSelf());
}
}
@Override
public void onGetJobStatusSubject(GetJobSchedInfoRequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getJobId().getCluster());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(new GetJobSchedInfoResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getJobId().getCluster() + " doesn't exist", Optional.empty()), getSelf());
}
}
@Override
public void onGetLatestJobDiscoveryInfo(GetLatestJobDiscoveryInfoRequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getJobCluster());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(new GetLatestJobDiscoveryInfoResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getJobCluster() + " doesn't exist", Optional.empty()), getSelf());
}
}
@Override
public void onJobListCompleted(ListCompletedJobsInClusterRequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
sender.tell(new ListCompletedJobsInClusterResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist", Lists.newArrayList()), getSelf());
}
}
@Override
public void onJobIdList(ListJobIdsRequest request) {
if(logger.isTraceEnabled()) { logger.trace("Enter onJobIdList"); }
ActorRef sender = getSender();
this.jobListHelperActor.tell(new JobListHelperActor.ListJobIdRequestWrapper(request, sender, jobClusterInfoManager.getAllJobClusterInfo()), getSelf());
if(logger.isTraceEnabled()) { logger.trace("Exit onJobIdList"); }
}
@Override
public void onListArchivedWorkers(ListArchivedWorkersRequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getJobId().getCluster());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
getSender().tell(new ListArchivedWorkersResponse(request.requestId, CLIENT_ERROR, "Job Cluster " + request.getJobId().getCluster() + " Not found", Lists.newArrayList()), getSelf());
}
}
public void onListActiveWorkers(ListWorkersRequest request) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getJobId().getCluster());
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(request, getContext());
} else {
getSender().tell(new ListWorkersResponse(request.requestId, CLIENT_ERROR, "Job Cluster " + request.getJobId().getCluster() + " Not found", Lists.newArrayList()), getSelf());
}
}
@Override
public void onJobList(ListJobsRequest request) {
ActorRef sender = getSender();
this.jobListHelperActor.tell(new JobListHelperActor.ListJobRequestWrapper(request, sender, jobClusterInfoManager.getAllJobClusterInfo()),getSelf());
}
@Override
public void onScaleStage(ScaleStageRequest scaleStage) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(scaleStage.getJobId().getCluster());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(scaleStage, getContext());
} else {
sender.tell(new ScaleStageResponse(scaleStage.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + scaleStage.getJobId().getCluster() + " doesn't exist",0), getSelf());
}
}
@Override
public void onResubmitWorker(ResubmitWorkerRequest r) {
Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(r.getJobId().getCluster());
ActorRef sender = getSender();
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().jobClusterActor.forward(r, getContext());
} else {
sender.tell(new ResubmitWorkerResponse(r.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + r.getJobId().getCluster() + " doesn't exist"), getSelf());
}
}
class JobClusterInfoManager {
private final Map<String, JobClusterInfo> jobClusterNameToInfoMap = new HashMap<>();
private final LifecycleEventPublisher eventPublisher;
private MantisSchedulerFactory mantisSchedulerFactory;
private final MantisJobStore jobStore;
private final Metrics metrics;
private final CostsCalculator costsCalculator;
JobClusterInfoManager(MantisJobStore jobStore, MantisSchedulerFactory mantisSchedulerFactory, LifecycleEventPublisher eventPublisher, CostsCalculator costsCalculator) {
this.eventPublisher = eventPublisher;
this.mantisSchedulerFactory = mantisSchedulerFactory;
this.jobStore = jobStore;
this.costsCalculator = costsCalculator;
MetricGroupId metricGroupId = new MetricGroupId("JobClusterInfoManager");
Metrics m = new Metrics.Builder()
.id(metricGroupId)
.addGauge(new GaugeCallback(metricGroupId, "jobClustersGauge", () -> 1.0 * jobClusterNameToInfoMap.size()))
.build();
this.metrics = MetricsRegistry.getInstance().registerAndGet(m);
}
/**
* Creates the job cluster Actor
* Watches it
* Adds it to internal map and publishes Lifecycle event
* Could throw an unchecked exception if actor creation fails
* @param jobClusterDefn
* @return jobClusterInfo if actor creation and registration succeeds, else empty
*/
Optional<JobClusterInfo> createClusterActorAndRegister(IJobClusterDefinition jobClusterDefn) {
String clusterName = jobClusterDefn.getName();
if(!isClusterExists(clusterName)) {
if (!ActorPaths.isValidPathElement(clusterName)) {
logger.error("Cannot create actor for cluster with invalid name {}", clusterName);
return empty();
}
ActorRef jobClusterActor =
getContext().actorOf(
JobClusterActor.props(clusterName, this.jobStore, this.mantisSchedulerFactory, this.eventPublisher, this.costsCalculator),
"JobClusterActor-" + clusterName);
getContext().watch(jobClusterActor);
JobClusterInfo jobClusterInfo = new JobClusterInfo(clusterName, jobClusterDefn, jobClusterActor);
jobClusterNameToInfoMap.put(clusterName, jobClusterInfo);
return ofNullable(jobClusterInfo);
} else {
return ofNullable(jobClusterNameToInfoMap.get(clusterName));
}
}
void deregisterJobCluster(String jobClusterName) {
Optional<JobClusterInfo> jobClusterInfo = getJobClusterInfo(jobClusterName);
if(jobClusterInfo.isPresent()) {
jobClusterInfo.get().markDeleted(System.currentTimeMillis());
// unwatch and stop actor
ActorRef jobClusterActor = jobClusterInfo.get().jobClusterActor;
getContext().unwatch(jobClusterActor);
getContext().stop(jobClusterActor);
jobClusterNameToInfoMap.remove(jobClusterName);
} else {
logger.warn("Job Cluster does not exist {}", jobClusterInfo);
}
}
Observable<JobClusterProto.InitializeJobClusterResponse> initializeCluster(JobClusterInfo jobClusterInfo, JobClusterProto.InitializeJobClusterRequest req, Duration t) {
jobClusterInfo.markInitializing(req, System.currentTimeMillis());
CompletionStage<JobClusterProto.InitializeJobClusterResponse> respCS = ask(jobClusterInfo.jobClusterActor, req, t)
.thenApply(JobClusterProto.InitializeJobClusterResponse.class::cast);
return Observable.from(respCS.toCompletableFuture(),Schedulers.io())
.map((resp)-> {
logger.info("JobCluster {} inited with code {}", resp.jobClusterName, resp.responseCode);
Optional<JobClusterInfo> jClusterInfo = jobClusterInfoManager.getJobClusterInfo(resp.jobClusterName);
if(resp.responseCode == SUCCESS) {
jClusterInfo.ifPresent((jci) -> jci.markInitialized(System.currentTimeMillis()));
}
return resp;
})
.onErrorResumeNext(ex -> {
logger.warn("caught exception {}", ex.getMessage(), ex);
numJobClusterInitFailures.increment();
// initialization fails deregister cluster
deregisterJobCluster(jobClusterInfo.clusterName);
return Observable.just(new JobClusterProto.InitializeJobClusterResponse(req.requestId, BaseResponse.ResponseCode.SERVER_ERROR,ex.getMessage(), jobClusterInfo.clusterName, ActorRef.noSender()));
});
}
void initializeClusterAsync(JobClusterInfo jobClusterInfo, JobClusterProto.InitializeJobClusterRequest req) {
jobClusterInfo.markInitializing(req,System.currentTimeMillis());
jobClusterInfo.jobClusterActor.tell(req, getSelf());
}
Optional<JobClusterInfo> getJobClusterInfo(String jobClusterName) {
return ofNullable(jobClusterNameToInfoMap.get(jobClusterName));
}
Optional<JobDefinition> getArchivedJobDefinition(String jobId) {
return jobStore.getArchivedJob(jobId).map(IMantisJobMetadata::getJobDefinition);
}
Map<String, JobClusterInfo> getAllJobClusterInfo() {
return Collections.unmodifiableMap(jobClusterNameToInfoMap);
}
boolean isClusterExists(String clusterName) {
return jobClusterNameToInfoMap.containsKey(clusterName);
}
void processInitializeResponse(JobClusterProto.InitializeJobClusterResponse createResp) {
Optional<JobClusterInfo> jClusterInfo = getJobClusterInfo(createResp.jobClusterName);
if(jClusterInfo.isPresent()) {
JobClusterInfo jobClusterInfo = jClusterInfo.get();
if(createResp.responseCode == SUCCESS) {
jobClusterInfo.markInitialized(System.currentTimeMillis());
createResp.requestor.tell(
new CreateJobClusterResponse(createResp.requestId,
SUCCESS_CREATED,
createResp.jobClusterName + " created",
createResp.jobClusterName),
getSelf());
} else if( createResp.responseCode == SERVER_ERROR){
deregisterJobCluster(createResp.jobClusterName);
createResp.requestor.tell(new CreateJobClusterResponse(createResp.requestId, createResp.responseCode, createResp.message, createResp.jobClusterName), getSelf());
}
} else {
logger.warn("Received JobClusterInitializeResponse {} for unknown Job Cluster {}", createResp, createResp.jobClusterName);
}
}
void processDeleteRequest(DeleteJobClusterRequest request) {
Optional<JobClusterInfo> jobClusterInfoOp= getJobClusterInfo(request.getName());
ActorRef sender = getSender();
if (jobClusterInfoOp.isPresent()) {
JobClusterInfo jobClusterInfo = jobClusterInfoOp.get();
jobClusterInfo.jobClusterActor.tell(
new JobClusterProto.DeleteJobClusterRequest(request.getUser(), request.getName(), sender),
getSelf());
jobClusterInfo.markDeleting(System.currentTimeMillis());
} else {
sender.tell(
new DeleteJobClusterResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getName() + " doesn't exist"),
getSelf());
}
}
void processDeleteResponse(JobClusterProto.DeleteJobClusterResponse resp) {
Optional<JobClusterInfo> jobClusterInfoOp= getJobClusterInfo(resp.clusterName);
if(jobClusterInfoOp.isPresent()) {
if(resp.responseCode == SUCCESS) {
deregisterJobCluster(resp.clusterName);
}
} else {
// No Such job cluster ignore
logger.warn("Received delete job cluster response {} for unknown job cluster {}", resp, resp.clusterName);
}
// inform caller
resp.requestingActor.tell(
new DeleteJobClusterResponse(resp.requestId, resp.responseCode, resp.message)
, getSelf());
}
}
@Value
public static class UpdateSchedulingInfo {
long requestId;
String clusterName;
SchedulingInfo schedulingInfo;
String version;
}
static class JobClusterInfo {
private static final Logger logger = LoggerFactory.getLogger(JobClusterInfo.class);
public enum JobClusterState { UNINITIALIZED, INITIALIZING, INITIALIZED, DELETING, DELETED}
private JobClusterProto.InitializeJobClusterRequest initRequest;
final String clusterName;
final ActorRef jobClusterActor;
private volatile JobClusterState currentState;
volatile long stateUpdateTime;
final IJobClusterDefinition jobClusterDefinition;
JobClusterInfo(final String clusterName, final IJobClusterDefinition clusterDefn, final ActorRef actor) {
this.clusterName = clusterName;
this.jobClusterActor = actor;
this.jobClusterDefinition = clusterDefn;
this.currentState = JobClusterState.UNINITIALIZED;
this.stateUpdateTime = System.currentTimeMillis();
}
public String getClusterName() {
return clusterName;
}
public IJobClusterDefinition getJobClusterDefinition() {
return jobClusterDefinition;
}
void markInitializing(JobClusterProto.InitializeJobClusterRequest req, long time) {
if(currentState == JobClusterState.UNINITIALIZED) {
this.stateUpdateTime = time;
currentState = JobClusterState.INITIALIZING;
initRequest = req;
} else {
logger.warn("Invalid state transition from {} to {} for job cluster {}", currentState, JobClusterState.INITIALIZING, clusterName);
}
}
void markInitialized(long time) {
if(currentState == JobClusterState.INITIALIZING ) {
this.stateUpdateTime = time;
this.currentState = JobClusterState.INITIALIZED;
} else {
logger.warn("Invalid state transition from {} to {} for job cluster {}", currentState, JobClusterState.INITIALIZED, clusterName);
}
}
void markDeleting(long time) {
this.currentState = JobClusterState.DELETING;
this.stateUpdateTime = time;
}
void markDeleted(long time) {
this.currentState = JobClusterState.DELETED;
this.stateUpdateTime = time;
}
@Override
public String toString() {
return "JobClusterInfo{" +
"clusterName='" + clusterName + '\'' +
", jobClusterActor=" + jobClusterActor +
", currentState=" + currentState +
", stateUpdateTime=" + stateUpdateTime +
", jobClusterDefinition=" + jobClusterDefinition +
'}';
}
}
}
| 8,027 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/IJobClustersManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master;
import io.mantisrx.master.JobClustersManagerActor.UpdateSchedulingInfo;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.CreateJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DeleteJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.KillJobRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobClustersRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.DeleteJobClusterResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.InitializeJobClusterResponse;
import io.mantisrx.server.master.scheduler.WorkerEvent;
public interface IJobClustersManager {
// cluster related messages
void onJobClusterCreate(CreateJobClusterRequest request);
void onJobClusterInitializeResponse(InitializeJobClusterResponse createResp);
void onJobClusterDelete(DeleteJobClusterRequest request);
void onJobClusterDeleteResponse(DeleteJobClusterResponse resp);
void onJobClusterUpdate(UpdateJobClusterRequest request);
void onJobClusterUpdateSLA(JobClusterManagerProto.UpdateJobClusterSLARequest r);
void onJobClusterUpdateArtifact(JobClusterManagerProto.UpdateJobClusterArtifactRequest r);
void onJobClusterUpdateSchedulingInfo(UpdateSchedulingInfo r);
void onJobClusterUpdateLabels(JobClusterManagerProto.UpdateJobClusterLabelsRequest r);
void onJobClusterUpdateWorkerMigrationConfig(JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest r);
void onJobClustersList(ListJobClustersRequest request);
void onJobClusterGet(JobClusterManagerProto.GetJobClusterRequest r);
void onJobClusterEnable(JobClusterManagerProto.EnableJobClusterRequest r);
void onJobClusterDisable(JobClusterManagerProto.DisableJobClusterRequest r);
void onGetJobStatusSubject(JobClusterManagerProto.GetJobSchedInfoRequest request);
void onGetLatestJobDiscoveryInfo(JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest request);
void onJobListCompleted(JobClusterManagerProto.ListCompletedJobsInClusterRequest r);
void onJobList(ListJobsRequest request);
void onJobIdList(ListJobIdsRequest request);
// worker related messages
void onGetLastSubmittedJobIdSubject(JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest request);
void onWorkerEvent(WorkerEvent r);
// Job related messages
void onJobSubmit(JobClusterManagerProto.SubmitJobRequest request);
void onJobKillRequest(KillJobRequest request);
void onGetJobDetailsRequest(JobClusterManagerProto.GetJobDetailsRequest request);
void onScaleStage(ScaleStageRequest scaleStage);
void onResubmitWorker(ResubmitWorkerRequest r);
void onListArchivedWorkers(ListArchivedWorkersRequest request);
void onListActiveWorkers(JobClusterManagerProto.ListWorkersRequest request);
void onReconcileJobClusters(JobClusterManagerProto.ReconcileJobCluster p);
}
| 8,028 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/DeadLetterActor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master;
import akka.actor.AbstractActor;
import akka.actor.DeadLetter;
import io.mantisrx.common.JsonSerializer;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class DeadLetterActor extends AbstractActor {
private final Counter numDeadLetterMsgs;
private final JsonSerializer serializer;
public DeadLetterActor() {
Metrics m = new Metrics.Builder()
.id("DeadLetterActor")
.addCounter("numDeadLetterMsgs")
.build();
Metrics metrics = MetricsRegistry.getInstance().registerAndGet(m);
this.numDeadLetterMsgs = metrics.getCounter("numDeadLetterMsgs");
this.serializer = new JsonSerializer();
}
@Override
public Receive createReceive() {
return receiveBuilder()
.match(DeadLetter.class, msg -> {
this.numDeadLetterMsgs.increment();
String m = toString(msg.message());
log.error("Dead Letter from {} to {} msg type: {} payload: {}",
msg.sender(),
msg.recipient(),
msg.message().getClass().getSimpleName(),
m.substring(0, Math.min(250, m.length() - 1)));
})
.build();
}
private String toString(Object o) {
try {
return serializer.toJson(o);
} catch (Exception e) {
log.error("Failed to serialize {}", o, e);
return o.toString();
}
}
}
| 8,029 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/zk/LeaderElector.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.zk;
import static io.mantisrx.shaded.org.apache.zookeeper.KeeperException.Code.OK;
import io.mantisrx.server.core.BaseService;
import io.mantisrx.server.master.ILeadershipManager;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.org.apache.curator.framework.CuratorFramework;
import io.mantisrx.shaded.org.apache.curator.framework.recipes.leader.LeaderLatch;
import io.mantisrx.shaded.org.apache.curator.framework.recipes.leader.LeaderLatchListener;
import io.mantisrx.shaded.org.apache.zookeeper.CreateMode;
import io.mantisrx.shaded.org.apache.zookeeper.data.Stat;
import io.netty.util.concurrent.DefaultThreadFactory;
import java.io.IOException;
import java.util.concurrent.Executors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class LeaderElector extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(LeaderElector.class);
private volatile boolean started = false;
private final ObjectMapper jsonMapper;
private final ILeadershipManager leadershipManager;
private final LeaderLatch leaderLatch;
private final CuratorFramework curator;
private final String electionPath;
// The path where a selected leader announces itself.
private final String leaderPath;
private LeaderElector(ObjectMapper jsonMapper,
ILeadershipManager leadershipManager,
CuratorFramework curator,
String electionPath,
String leaderPath) {
super(false);
this.jsonMapper = jsonMapper;
this.leadershipManager = leadershipManager;
this.curator = curator;
this.leaderLatch = createNewLeaderLatch(electionPath);
this.electionPath = electionPath;
this.leaderPath = leaderPath;
}
@Override
public void start() {
if (started) {
return;
}
started = true;
try {
Stat pathStat = curator.checkExists().forPath(leaderPath);
// Create the path only if the path does not exist
if(pathStat == null) {
curator.create()
.creatingParentsIfNeeded()
.withMode(CreateMode.PERSISTENT)
.forPath(leaderPath);
}
leaderLatch.start();
} catch (Exception e) {
throw new IllegalStateException("Failed to create a leader elector for master: "+e.getMessage(), e);
}
}
@Override
public void shutdown() {
try {
leaderLatch.close();
} catch (IOException e) {
logger.warn("Failed to close the leader latch: "+e.getMessage(), e);
}finally {
started = false;
}
}
private LeaderLatch createNewLeaderLatch(String leaderPath) {
final LeaderLatch newLeaderLatch = new LeaderLatch(curator, leaderPath, "127.0.0.1");
newLeaderLatch.addListener(
new LeaderLatchListener() {
@Override
public void isLeader() {
announceLeader();
}
@Override
public void notLeader() {
leadershipManager.stopBeingLeader();
}
}, Executors.newSingleThreadExecutor(new DefaultThreadFactory("MasterLeader-%s")));
return newLeaderLatch;
}
private void announceLeader() {
try {
logger.info("Announcing leader");
byte[] masterDescription = jsonMapper.writeValueAsBytes(leadershipManager.getDescription());
// There is no need to lock anything because we ensure only leader will write to the leader path
curator
.setData()
.inBackground((client, event) -> {
if (event.getResultCode() == OK.intValue()) {
leadershipManager.becomeLeader();
} else {
logger.warn("Failed to elect leader from path {} with event {}", leaderPath, event);
}
}).forPath(leaderPath, masterDescription);
} catch (Exception e) {
throw new RuntimeException("Failed to announce leader: "+e.getMessage(), e);
}
}
public static LeaderElector.Builder builder(ILeadershipManager manager) {
return new LeaderElector.Builder(manager);
}
public static class Builder {
private ObjectMapper jsonMapper;
private ILeadershipManager leadershipManager;
private CuratorFramework curator;
private String electionPath;
private String announcementPath;
public Builder(ILeadershipManager leadershipManager){
this.leadershipManager = leadershipManager;
}
public LeaderElector.Builder withJsonMapper(ObjectMapper jsonMapper) {
this.jsonMapper = jsonMapper;
return this;
}
public LeaderElector.Builder withCurator(CuratorFramework curator) {
this.curator = curator;
return this;
}
public LeaderElector.Builder withElectionPath(String path) {
this.electionPath = path;
return this;
}
public LeaderElector.Builder withAnnouncementPath(String annPath) {
this.announcementPath = annPath;
return this;
}
public LeaderElector build() {
return new LeaderElector(jsonMapper, leadershipManager, curator, electionPath, announcementPath);
}
}
}
| 8,030 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/scheduler/ScheduleRequest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.scheduler;
import com.netflix.fenzo.ConstraintEvaluator;
import com.netflix.fenzo.VMTaskFitnessCalculator;
import com.netflix.fenzo.queues.QAttributes;
import com.netflix.fenzo.queues.QueuableTask;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.server.core.domain.JobMetadata;
import io.mantisrx.server.core.domain.WorkerId;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public class ScheduleRequest implements QueuableTask {
public static final QAttributes DEFAULT_Q_ATTRIBUTES = new QAttributes() {
@Override
public String getBucketName() {
return "default";
}
@Override
public int getTierNumber() {
return 0;
}
};
private static final String defaultGrpName = "defaultGrp";
private final WorkerId workerId;
private final int stageNum;
private final int numPortsRequested;
private final JobMetadata jobMetadata;
private final MantisJobDurationType durationType;
private final MachineDefinition machineDefinition;
private final List<ConstraintEvaluator> hardConstraints;
private final List<VMTaskFitnessCalculator> softConstraints;
private volatile long readyAt;
private final Optional<String> preferredCluster;
public ScheduleRequest(final WorkerId workerId,
final int stageNum,
final int numPortsRequested,
final JobMetadata jobMetadata,
final MantisJobDurationType durationType,
final MachineDefinition machineDefinition,
final List<ConstraintEvaluator> hardConstraints,
final List<VMTaskFitnessCalculator> softConstraints,
final long readyAt,
final Optional<String> preferredCluster) {
this.workerId = workerId;
this.stageNum = stageNum;
this.numPortsRequested = numPortsRequested;
this.jobMetadata = jobMetadata;
this.durationType = durationType;
this.machineDefinition = machineDefinition;
this.hardConstraints = hardConstraints;
this.softConstraints = softConstraints;
this.readyAt = readyAt;
this.preferredCluster = preferredCluster;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ScheduleRequest that = (ScheduleRequest) o;
return workerId != null ? workerId.equals(that.workerId) : that.workerId == null;
}
@Override
public int hashCode() {
return workerId != null ? workerId.hashCode() : 0;
}
@Override
public String getId() {
return workerId.getId();
}
public WorkerId getWorkerId() {
return workerId;
}
@Override
public String taskGroupName() {
return defaultGrpName;
}
@Override
public double getCPUs() {
return machineDefinition.getCpuCores();
}
@Override
public double getMemory() {
return machineDefinition.getMemoryMB();
}
@Override
public double getNetworkMbps() {
return machineDefinition.getNetworkMbps();
}
@Override
public double getDisk() {
return machineDefinition.getDiskMB();
}
@Override
public int getPorts() {
return numPortsRequested;
}
public JobMetadata getJobMetadata() {
return jobMetadata;
}
public MachineDefinition getMachineDefinition() {
return machineDefinition;
}
@Override
public Map<String, Double> getScalarRequests() {
return Collections.emptyMap();
}
@Override
public Map<String, NamedResourceSetRequest> getCustomNamedResources() {
return Collections.emptyMap();
}
@Override
public List<ConstraintEvaluator> getHardConstraints() {
return hardConstraints;
}
@Override
public List<VMTaskFitnessCalculator> getSoftConstraints() {
return softConstraints;
}
@Override
public void setAssignedResources(AssignedResources assignedResources) {
// no-op Not using them at this time
}
@Override
public AssignedResources getAssignedResources() {
// not used by Mantis
return null;
}
public MantisJobDurationType getDurationType() {
return durationType;
}
public int getStageNum() {
return stageNum;
}
@Override
public QAttributes getQAttributes() {
return DEFAULT_Q_ATTRIBUTES;
}
@Override
public long getReadyAt() {
return readyAt;
}
@Override
public void safeSetReadyAt(long when) {
readyAt = when;
}
public Optional<String> getPreferredCluster() {
return preferredCluster;
}
@Override
public String toString() {
return "ScheduleRequest{" +
"workerId=" + workerId +
", readyAt=" + readyAt +
'}';
}
}
| 8,031 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/scheduler/AgentsErrorMonitorActor.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.scheduler;
import static java.util.Optional.empty;
import static java.util.Optional.of;
import akka.actor.AbstractActorWithTimers;
import akka.actor.ActorRef;
import akka.actor.Props;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.master.events.LifecycleEventsProto;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.functions.Action1;
public class AgentsErrorMonitorActor extends AbstractActorWithTimers implements IAgentsErrorMonitor {
private final Logger logger = LoggerFactory.getLogger(AgentsErrorMonitorActor.class);
private static final long ERROR_CHECK_WINDOW_MILLIS=120000; // 2 mins
private static final int ERROR_CHECK_WINDOW_COUNT=3;
private static final long TOO_OLD_MILLIS = 3600000;
private static final long DISABLE_DURATION_MILLIS = 60*1000; // 1mins
private Action1<String> slaveEnabler = s -> logger.warn("SlaveEnabler not initialized yet!");
private Action1<String> slaveDisabler = s -> logger.warn("SlaveDisabler not initialized yet!");
private long too_old_mills;
private int error_check_window_count;
private long error_check_window_millis;
private long disableDurationMillis;
private final Map<String, HostErrors> hostErrorMap = new HashMap<>();
private static final String CHECK_HOST_TIMER_KEY = "CHECK_HOST";
private Optional<MantisScheduler> mantisSchedulerOptional = empty();
// Behavior after being initialized
Receive initializedBehavior;
public static Props props(long too_old_millis, int error_check_window_count, long error_check_window_millis, long disableDurationMillis) {
return Props.create(AgentsErrorMonitorActor.class, too_old_millis, error_check_window_count,error_check_window_millis, disableDurationMillis);
}
public static Props props() {
return Props.create(AgentsErrorMonitorActor.class, TOO_OLD_MILLIS, ERROR_CHECK_WINDOW_COUNT,ERROR_CHECK_WINDOW_MILLIS, DISABLE_DURATION_MILLIS);
}
public AgentsErrorMonitorActor() {
this(TOO_OLD_MILLIS,ERROR_CHECK_WINDOW_COUNT,ERROR_CHECK_WINDOW_MILLIS, DISABLE_DURATION_MILLIS);
}
public AgentsErrorMonitorActor(long too_old_millis, int error_check_window_count, long error_check_window_millis, long disableDurationMillis) {
this.too_old_mills = (too_old_millis>0)? too_old_millis : TOO_OLD_MILLIS;
this.error_check_window_count = (error_check_window_count>0)? error_check_window_count : ERROR_CHECK_WINDOW_COUNT;
this.error_check_window_millis = (error_check_window_millis>1000)? error_check_window_millis : ERROR_CHECK_WINDOW_MILLIS;
this.disableDurationMillis = (disableDurationMillis>-1) ? disableDurationMillis : DISABLE_DURATION_MILLIS;
this.initializedBehavior = receiveBuilder()
.match(LifecycleEventsProto.WorkerStatusEvent.class, js -> onWorkerEvent(js))
.match(CheckHostHealthMessage.class, js -> onCheckHostHealth())
.match(HostErrorMapRequest.class, js -> onHostErrorMapRequest())
.matchAny(x -> logger.warn("unexpected message '{}' received by AgentsErrorMonitorActor actor ", x))
.build();
}
@Override
public Receive createReceive() {
return receiveBuilder()
.match(InitializeAgentsErrorMonitor.class, js -> onInitialize(js))
.matchAny(x -> logger.warn("unexpected message '{}' received by AgentsErrorMonitorActor actor ", x))
.build();
}
public void onInitialize(InitializeAgentsErrorMonitor initializeAgentsErrorMonitor) {
this.mantisSchedulerOptional = of(initializeAgentsErrorMonitor.getScheduler());
slaveDisabler = hostName -> mantisSchedulerOptional.get().disableVM(hostName,disableDurationMillis);
slaveEnabler = hostName -> mantisSchedulerOptional.get().enableVM(hostName);
getContext().become(initializedBehavior);
getTimers().startPeriodicTimer(CHECK_HOST_TIMER_KEY, new CheckHostHealthMessage(), scala.concurrent.duration.Duration.create(error_check_window_millis, TimeUnit.MILLISECONDS));
}
@Override
public void onCheckHostHealth() {
Instant currentTime = Instant.now();
Iterator<HostErrors> it = hostErrorMap.values().iterator();
while(it.hasNext()) {
HostErrors hErrors = it.next();
long lastActivityAt = hErrors.getLastActivityAt();
long timeSinceLastEvent = currentTime.toEpochMilli() - lastActivityAt;
if(timeSinceLastEvent > this.too_old_mills) {
logger.debug("No Events from host since {} evicting", timeSinceLastEvent);
it.remove();
}
}
}
@Override
public void onWorkerEvent(LifecycleEventsProto.WorkerStatusEvent workerEvent) {
if(logger.isTraceEnabled()) { logger.trace("onWorkerEvent " + workerEvent + " is error state " + WorkerState.isErrorState(workerEvent.getWorkerState())); }
if(workerEvent.getHostName().isPresent() && WorkerState.isErrorState(workerEvent.getWorkerState())) {
String hostName = workerEvent.getHostName().get();
logger.info("Registering worker error on host {}", hostName);
HostErrors hostErrors = hostErrorMap.computeIfAbsent(hostName, (hName) -> new HostErrors(hName,slaveEnabler,this.error_check_window_millis,this.error_check_window_count));
if(hostErrors.addAndGetIsTooManyErrors(workerEvent)) {
logger.warn("Host {} has too many errors in a short duration, disabling..", hostName);
this.slaveDisabler.call(hostName);
}
}
}
@Override
public void onHostErrorMapRequest() {
ActorRef sender = getSender();
sender.tell(new HostErrorMapResponse(Collections.unmodifiableMap(this.hostErrorMap)), getSelf());
}
public static class InitializeAgentsErrorMonitor {
private final MantisScheduler scheduler;
public InitializeAgentsErrorMonitor(final MantisScheduler scheduler) {
Preconditions.checkNotNull(scheduler, "MantisScheduler cannot be null");
this.scheduler = scheduler;
}
public MantisScheduler getScheduler() {
return this.scheduler;
}
}
static class CheckHostHealthMessage {
long now = -1;
public CheckHostHealthMessage() {
}
public CheckHostHealthMessage(long now) {
this.now = now;
}
public long getCurrentTime() {
if(now == -1) {
return System.currentTimeMillis();
} else {
return this.now;
}
}
}
static class HostErrorMapRequest {
}
static class HostErrorMapResponse {
private final Map<String, HostErrors> errorMap;
public HostErrorMapResponse(final Map<String, HostErrors> hostErrorsMap) {
this.errorMap = hostErrorsMap;
}
public Map<String,HostErrors> getMap() {
return this.errorMap;
}
}
static class HostErrors {
private static final Logger logger = LoggerFactory.getLogger(HostErrors.class);
private final String hostname;
private final List<Long> errors;
private long lastActivityAt = System.currentTimeMillis();
private final Action1<String> slaveEnabler;
private final long error_check_window_millis;
private final int windowCount;
HostErrors(String hostname, Action1<String> slaveEnabler, long error_check_window_millis, int windowCount) {
this.hostname = hostname;
this.errors = new ArrayList<>();
this.slaveEnabler = slaveEnabler;
this.error_check_window_millis = error_check_window_millis;
this.windowCount = windowCount;
}
long getLastActivityAt() {
return lastActivityAt;
}
boolean addAndGetIsTooManyErrors(LifecycleEventsProto.WorkerStatusEvent status) {
logger.info("InaddGetisTooManyErrors for host {}", hostname);
lastActivityAt = status.getTimestamp();
if(WorkerState.isErrorState(status.getWorkerState())) {
errors.add(lastActivityAt);
logger.info("Registering error {}", errors);
} else if(status.getWorkerState() == WorkerState.Started) {
// saw a successfull worker start and error list is not empty clear it and reenable host
if(!errors.isEmpty()) {
errors.clear();
logger.info("{} cleared of errors, reenabling host ", hostname);
slaveEnabler.call(hostname);
}
}
final Iterator<Long> iterator = errors.iterator();
while(iterator.hasNext()) {
final long next = iterator.next();
// purge old events (rolling window)
if((lastActivityAt - next) > error_check_window_millis)
iterator.remove();
}
logger.info("No of errors in window is {} ", errors.size());
return errors.size() > windowCount;
}
List<Long> getErrorTimestampList() {
return Collections.unmodifiableList(errors);
}
}
}
| 8,032 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/scheduler/ConstraintsEvaluators.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.scheduler;
//
//import com.netflix.fenzo.AsSoftConstraint;
//import com.netflix.fenzo.ConstraintEvaluator;
//import com.netflix.fenzo.VMTaskFitnessCalculator;
//import com.netflix.fenzo.plugins.BalancedHostAttrConstraint;
//import com.netflix.fenzo.plugins.ExclusiveHostConstraint;
//import com.netflix.fenzo.plugins.UniqueHostAttrConstraint;
//import io.mantisrx.runtime.JobConstraints;
//import io.mantisrx.server.master.config.ConfigurationProvider;
//import io.mantisrx.server.master.ClusterAffinityConstraint;
//import org.slf4j.Logger;
//import org.slf4j.LoggerFactory;
//import com.netflix.fenzo.functions.Func1;
//
//import java.util.Set;
//
//public class ConstraintsEvaluators {
//
// private static final String MANTISAGENT_MAIN_M4 = "mantisagent-main-m4";
// private static final int EXPECTED_NUM_ZONES = 3;
// private static ExclusiveHostConstraint exclusiveHostConstraint = new ExclusiveHostConstraint();
// private static final Logger logger = LoggerFactory.getLogger(ConstraintsEvaluators.class);
//
// public static ConstraintEvaluator hardConstraint(JobConstraints constraint, final Set<String> coTasks) {
// switch (constraint) {
// case ExclusiveHost:
// return exclusiveHostConstraint;
// case UniqueHost:
// return new UniqueHostAttrConstraint(new Func1<String, Set<String>>() {
// @Override
// public Set<String> call(String s) {
// return coTasks;
// }
// });
// case ZoneBalance:
// return new BalancedHostAttrConstraint(new Func1<String, Set<String>>() {
// @Override
// public Set<String> call(String s) {
// return coTasks;
// }
// }, zoneAttributeName(), EXPECTED_NUM_ZONES);
// case M4Cluster:
// return new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M4);
// default:
// logger.error("Unknown job hard constraint " + constraint);
// return null;
// }
// }
//
// private static String asgAttributeName() {
// return ConfigurationProvider.getConfig().getActiveSlaveAttributeName();
// }
//
// private static String zoneAttributeName() {
// return ConfigurationProvider.getConfig().getHostZoneAttributeName();
// }
//
// public static VMTaskFitnessCalculator softConstraint(JobConstraints constraint, final Set<String> coTasks) {
// switch (constraint) {
// case ExclusiveHost:
// return AsSoftConstraint.get(exclusiveHostConstraint);
// case UniqueHost:
// return AsSoftConstraint.get(new UniqueHostAttrConstraint(new Func1<String, Set<String>>() {
// @Override
// public Set<String> call(String s) {
// return coTasks;
// }
// }));
// case ZoneBalance:
// return new BalancedHostAttrConstraint(new Func1<String, Set<String>>() {
// @Override
// public Set<String> call(String s) {
// return coTasks;
// }
// }, zoneAttributeName(), EXPECTED_NUM_ZONES).asSoftConstraint();
// case M4Cluster:
// return AsSoftConstraint.get(new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M4));
// default:
// logger.error("Unknown job soft constraint " + constraint);
// return null;
// }
// }
//}
| 8,033 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/scheduler/IAgentsErrorMonitor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.scheduler;
import io.mantisrx.master.events.LifecycleEventsProto;
public interface IAgentsErrorMonitor {
void onCheckHostHealth();
void onWorkerEvent(LifecycleEventsProto.WorkerStatusEvent workerEvent);
void onHostErrorMapRequest();
}
| 8,034 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/scheduler/WorkerStateAdapter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.scheduler;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.server.master.scheduler.WorkerResourceStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class WorkerStateAdapter {
private static final Logger logger = LoggerFactory.getLogger(WorkerStateAdapter.class);
// Mark constructor private as this class is not intended to be instantiated
private WorkerStateAdapter() {}
public static WorkerState from(final WorkerResourceStatus.VMResourceState resourceState) {
final WorkerState state;
switch (resourceState) {
case START_INITIATED:
state = WorkerState.StartInitiated;
break;
case STARTED:
state = WorkerState.Started;
break;
case FAILED:
state = WorkerState.Failed;
break;
case COMPLETED:
state = WorkerState.Completed;
break;
default:
logger.error("Missing WorkerState mapping for VMResourceState {}", resourceState);
throw new IllegalArgumentException("unknown enum value for VMResourceState " + resourceState);
}
return state;
}
}
| 8,035 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/scheduler/JobMessageRouterImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.scheduler;
import akka.actor.ActorRef;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import io.mantisrx.server.master.scheduler.WorkerEvent;
public class JobMessageRouterImpl implements JobMessageRouter {
final ActorRef jobClusterManagerRef;
public JobMessageRouterImpl(final ActorRef jobClusterManagerActorRef) {
this.jobClusterManagerRef = jobClusterManagerActorRef;
}
@Override
public boolean routeWorkerEvent(final WorkerEvent workerEvent) {
jobClusterManagerRef.tell(workerEvent, ActorRef.noSender());
/* TODO - need a return value to indicate to scheduling service if the worker was marked Launched successfully
from the Job Management perspective, only then the Task is dispatched to Mesos. If this method returns false for the
WorkerLaunched event, no mesos task is launched. The return value would have to be async to work with the Actor model
unless we use the Ask pattern here. */
return true;
}
}
| 8,036 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/utils/CaffeineMetrics.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.utils;
import static java.util.Objects.requireNonNull;
import com.github.benmanes.caffeine.cache.stats.CacheStats;
import com.github.benmanes.caffeine.cache.stats.StatsCounter;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import java.util.concurrent.TimeUnit;
public final class CaffeineMetrics implements StatsCounter {
private final Counter hitCount;
private final Counter missCount;
private final Counter loadSuccessCount;
private final Counter loadFailureCount;
// TODO make totalLoadTime a Timer
private final Gauge totalLoadTime;
private final Counter evictionCount;
private final Counter evictionWeight;
/**
* Constructs an instance for use by a single cache.
*/
public CaffeineMetrics(String metricGroup) {
requireNonNull(metricGroup);
Metrics m = new Metrics.Builder()
.id("CaffeineMetrics_" + metricGroup)
.addCounter("hits")
.addCounter("misses")
.addGauge("loadTimeMillis")
.addCounter("loadsSuccess")
.addCounter("loadsFailure")
.addCounter("evictions")
.addCounter("evictionsWeight")
.build();
Metrics metrics = MetricsRegistry.getInstance().registerAndGet(m);
hitCount = metrics.getCounter("hits");
missCount = metrics.getCounter("misses");
totalLoadTime = metrics.getGauge("loadTimeMillis");
loadSuccessCount = metrics.getCounter("loadsSuccess");
loadFailureCount = metrics.getCounter("loadsFailure");
evictionCount = metrics.getCounter("evictions");
evictionWeight = metrics.getCounter("evictionsWeight");
}
@Override
public void recordHits(int count) {
hitCount.increment(count);
}
@Override
public void recordMisses(int count) {
missCount.increment(count);
}
@Override
public void recordLoadSuccess(long loadTime) {
loadSuccessCount.increment();
totalLoadTime.set(TimeUnit.MILLISECONDS.convert(loadTime, TimeUnit.NANOSECONDS));
}
@Override
public void recordLoadFailure(long loadTime) {
loadFailureCount.increment();
totalLoadTime.set(TimeUnit.MILLISECONDS.convert(loadTime, TimeUnit.NANOSECONDS));
}
@Override
@SuppressWarnings("deprecation")
public void recordEviction() {
// This method is scheduled for removal in version 3.0 in favor of recordEviction(weight)
recordEviction(1);
}
@Override
public void recordEviction(int weight) {
evictionCount.increment();
evictionWeight.increment(weight);
}
@Override
public CacheStats snapshot() {
return new CacheStats(
hitCount.value(),
missCount.value(),
loadSuccessCount.value(),
loadFailureCount.value(),
totalLoadTime.value(),
evictionCount.value(),
evictionWeight.value());
}
@Override
public String toString() {
return snapshot().toString();
}
}
| 8,037 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/IJobClusterManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import io.mantisrx.master.JobClustersManagerActor.UpdateSchedulingInfo;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListCompletedJobsInClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLARequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterProto;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.DeleteJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.EnforceSLARequest;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.ExpireOldJobsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.InitializeJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.JobStartedEvent;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.KillJobRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.KillJobResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.TriggerCronRequest;
import io.mantisrx.master.jobcluster.proto.JobProto.JobInitialized;
import io.mantisrx.server.master.scheduler.WorkerEvent;
/**
* Declares the behavior for Job Cluster Manager.
*/
public interface IJobClusterManager {
void onJobClusterInitialize(InitializeJobClusterRequest initReq);
void onJobClusterUpdate(UpdateJobClusterRequest request);
void onJobClusterDelete(DeleteJobClusterRequest request);
void onJobList(ListJobsRequest request);
void onJobListCompleted(ListCompletedJobsInClusterRequest request);
void onJobClusterDisable(DisableJobClusterRequest req);
void onJobClusterEnable(EnableJobClusterRequest req);
void onJobClusterGet(GetJobClusterRequest request);
void onJobSubmit(SubmitJobRequest request);
void onJobInitialized(JobInitialized jobInited);
void onJobStarted(JobStartedEvent startedEvent);
void onWorkerEvent(WorkerEvent r);
void onJobKillRequest(KillJobRequest req);
void onResubmitWorkerRequest(ResubmitWorkerRequest req);
void onKillJobResponse(KillJobResponse killJobResponse);
void onGetJobDetailsRequest(GetJobDetailsRequest req);
void onGetLatestJobDiscoveryInfo(JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest request);
void onGetJobStatusSubject(JobClusterManagerProto.GetJobSchedInfoRequest request);
void onGetLastSubmittedJobIdSubject(JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest request);
void onEnforceSLARequest(EnforceSLARequest request);
void onBookkeepingRequest(JobClusterProto.BookkeepingRequest request);
void onJobClusterUpdateSLA(UpdateJobClusterSLARequest slaRequest);
void onJobClusterUpdateLabels(UpdateJobClusterLabelsRequest labelRequest);
void onJobClusterUpdateArtifact(UpdateJobClusterArtifactRequest artifactReq);
void onJobClusterUpdateSchedulingInfo(UpdateSchedulingInfo request);
void onJobClusterUpdateWorkerMigrationConfig(UpdateJobClusterWorkerMigrationStrategyRequest req);
void onScaleStage(ScaleStageRequest scaleStage);
void onResubmitWorker(ResubmitWorkerRequest r);
void onJobIdList(ListJobIdsRequest request);
void onExpireOldJobs(ExpireOldJobsRequest request);
void onListArchivedWorkers(ListArchivedWorkersRequest request);
void onListActiveWorkers(JobClusterManagerProto.ListWorkersRequest request);
void onTriggerCron(TriggerCronRequest request);
}
| 8,038 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/LabelManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.command.InvalidJobException;
import io.mantisrx.server.master.domain.JobDefinition;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class LabelManager {
public enum SystemLabels {
MANTIS_IS_RESUBMIT_LABEL("_mantis.isResubmit"),
MANTIS_ARTIFACT_LABEL("_mantis.artifact"),
MANTIS_VERSION_LABEL("_mantis.version"),
MANTIS_SUBMITTER_LABEL("_mantis.submitter"),
MANTIS_OWNER_EMAIL_LABEL("_mantis.ownerEmail"),
MANTIS_CRITIALITY_LABEL("_mantis.criticality"),
MANTIS_DATA_ORIGIN_LABEL("_mantis.dataOrigin"),
MANTIS_JOB_TYPE_LABEL("_mantis.jobType"),
MANTIS_RESOURCE_CLUSTER_NAME_LABEL("_mantis.resourceCluster");
public final String label;
SystemLabels(String s) {
this.label = s;
}
};
private static final Logger logger = LoggerFactory.getLogger(LabelManager.class);
static int numberOfMandatoryLabels() {
return 2;
}
static JobDefinition insertSystemLabels(JobDefinition resolvedJobDefn, boolean autoResubmit) {
JobDefinition updatedJobDefn = resolvedJobDefn;
if(autoResubmit) {
updatedJobDefn = insertAutoResubmitLabel(resolvedJobDefn);
}
String artifactName = updatedJobDefn.getArtifactName();
String version = updatedJobDefn.getVersion();
List<Label> labels = updatedJobDefn.getLabels();
// remove old artifact & version label if present.
List<Label> updatedLabels = labels.stream()
.filter(label -> !(label.getName().equals(SystemLabels.MANTIS_ARTIFACT_LABEL.label)))
.filter(label -> !label.getName().equals(SystemLabels.MANTIS_VERSION_LABEL.label))
.collect(Collectors.toList());
updatedLabels.add(new Label(SystemLabels.MANTIS_ARTIFACT_LABEL.label, artifactName));
updatedLabels.add(new Label(SystemLabels.MANTIS_VERSION_LABEL.label, version));
try {
updatedJobDefn = new JobDefinition.Builder().from(updatedJobDefn)
.withLabels(updatedLabels).build();
return updatedJobDefn;
} catch (InvalidJobException e) {
logger.error(e.getMessage());
return resolvedJobDefn;
}
}
static JobDefinition insertAutoResubmitLabel(JobDefinition resolvedJobDefn) {
List<Label> labels = resolvedJobDefn.getLabels();
boolean alreadyHasResubmitLabel = labels.stream().anyMatch(
label -> label.getName().equals(SystemLabels.MANTIS_IS_RESUBMIT_LABEL.label));
if(!alreadyHasResubmitLabel) {
List<Label> updatedLabels = new ArrayList<>(labels);
updatedLabels.add(new Label(SystemLabels.MANTIS_IS_RESUBMIT_LABEL.label, "true"));
try {
JobDefinition updatedJobDefn = new JobDefinition.Builder().from(resolvedJobDefn)
.withLabels(updatedLabels).build();
logger.debug("Added isResubmit label");
return updatedJobDefn;
} catch (InvalidJobException e) {
logger.error(e.getMessage());
return resolvedJobDefn;
}
} else {
logger.debug("Job " + resolvedJobDefn.getName() + " already has isResubmit label. Don't add new");
return resolvedJobDefn;
}
}
}
| 8,039 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/JobClusterActor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import static akka.pattern.PatternsCS.ask;
import static io.mantisrx.common.SystemParameters.JOB_WORKER_HEARTBEAT_INTERVAL_SECS;
import static io.mantisrx.common.SystemParameters.JOB_WORKER_TIMEOUT_SECS;
import static io.mantisrx.master.StringConstants.MANTIS_MASTER_USER;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_NOT_FOUND;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS;
import static java.util.Optional.empty;
import static java.util.Optional.of;
import static java.util.Optional.ofNullable;
import akka.actor.AbstractActorWithTimers;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.SupervisorStrategy;
import akka.actor.Terminated;
import com.mantisrx.common.utils.LabelUtils;
import com.netflix.fenzo.triggers.CronTrigger;
import com.netflix.fenzo.triggers.TriggerOperator;
import com.netflix.fenzo.triggers.exceptions.SchedulerException;
import com.netflix.fenzo.triggers.exceptions.TriggerNotFoundException;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.common.Label;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.GaugeCallback;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.master.JobClustersManagerActor.UpdateSchedulingInfo;
import io.mantisrx.master.akka.MantisActorSupervisorStrategy;
import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter.JobIdInfo;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventsProto;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.master.jobcluster.job.JobActor;
import io.mantisrx.master.jobcluster.job.JobHelper;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataView;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DeleteJobClusterResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDefinitionUpdatedFromJobActorRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDefinitionUpdatedFromJobActorResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.JobClustersManagerInitializeResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.KillJobResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListCompletedJobsInClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListCompletedJobsInClusterResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobCriteria;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListWorkersRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListWorkersResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLARequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLAResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateSchedulingInfoResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterProto;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.JobStartedEvent;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.KillJobRequest;
import io.mantisrx.master.jobcluster.proto.JobProto;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.command.InvalidJobException;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.master.ConstraintsEvaluators;
import io.mantisrx.server.master.InvalidJobRequest;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.domain.IJobClusterDefinition;
import io.mantisrx.server.master.domain.IJobClusterDefinition.CronPolicy;
import io.mantisrx.server.master.domain.JobClusterConfig;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.domain.SLA;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.persistence.exceptions.JobClusterAlreadyExistsException;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import io.mantisrx.shaded.com.google.common.base.Throwables;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Action1;
import rx.schedulers.Schedulers;
import rx.subjects.BehaviorSubject;
/**
* Actor responsible for handling all operations related to one Job Cluster.
* @author njoshi
*
*/
public class JobClusterActor extends AbstractActorWithTimers implements IJobClusterManager {
private static final int BOOKKEEPING_INTERVAL_SECS = 5;
private static final String BOOKKEEPING_TIMER_KEY = "JOB_CLUSTER_BOOKKEEPING";
private static final Integer DEFAULT_LIMIT = 100;
private static final Integer DEFAULT_ACTIVE_JOB_LIMIT = 5000;
private final Logger logger = LoggerFactory.getLogger(JobClusterActor.class);
private static final String CHECK_EXPIRED_TIMER_KEY = "EXPIRE_OLD_JOBS";
private static final long EXPIRED_JOBS_CHECK_INTERVAL_SECS = 3600;
private final Counter numJobSubmissions;
private final Counter numJobShutdowns;
private final Counter numJobActorCreationCounter;
private final Counter numJobClustersInitialized;
private final Counter numJobClusterInitializeFailures;
private final Counter numJobsInitialized;
private final Counter numJobSubmissionFailures;
private final Counter numJobClusterEnable;
private final Counter numJobClusterEnableErrors;
private final Counter numJobClusterDisable;
private final Counter numJobClusterDisableErrors;
private final Counter numJobClusterDelete;
private final Counter numJobClusterDeleteErrors;
private final Counter numJobClusterUpdate;
private final Counter numJobClusterUpdateErrors;
private final Counter numJobsStuckInAccepted;
private final Counter numSLAEnforcementExecutions;
public static Props props(
final String name,
final MantisJobStore jobStore,
final MantisSchedulerFactory mantisSchedulerFactory,
final LifecycleEventPublisher eventPublisher,
final CostsCalculator costsCalculator) {
return Props.create(JobClusterActor.class, name, jobStore, mantisSchedulerFactory, eventPublisher, costsCalculator);
}
private Receive initializedBehavior;
private Receive disabledBehavior;
private final String name;
private final MantisJobStore jobStore;
private IJobClusterMetadata jobClusterMetadata;
private CronManager cronManager;
private SLAEnforcer slaEnforcer;
private final JobManager jobManager;
private final MantisSchedulerFactory mantisSchedulerFactory;
private final LifecycleEventPublisher eventPublisher;
private BehaviorSubject<JobId> jobIdSubmissionSubject;
private final JobDefinitionResolver jobDefinitionResolver = new JobDefinitionResolver();
public JobClusterActor(
final String name,
final MantisJobStore jobStore,
final MantisSchedulerFactory schedulerFactory,
final LifecycleEventPublisher eventPublisher,
final CostsCalculator costsCalculator) {
this.name = name;
this.jobStore = jobStore;
this.mantisSchedulerFactory = schedulerFactory;
this.eventPublisher = eventPublisher;
this.jobManager = new JobManager(name, getContext(), mantisSchedulerFactory, eventPublisher, jobStore, costsCalculator);
jobIdSubmissionSubject = BehaviorSubject.create();
initializedBehavior = buildInitializedBehavior();
disabledBehavior = buildDisabledBehavior();
MetricGroupId metricGroupId = getMetricGroupId(name);
Metrics m = new Metrics.Builder()
.id(metricGroupId)
.addCounter("numJobSubmissions")
.addCounter("numJobSubmissionFailures")
.addCounter("numJobShutdowns")
.addCounter("numJobActorCreationCounter")
.addCounter("numJobsInitialized")
.addCounter("numJobClustersInitialized")
.addCounter("numJobClusterInitializeFailures")
.addCounter("numJobClusterEnable")
.addCounter("numJobClusterEnableErrors")
.addCounter("numJobClusterDisable")
.addCounter("numJobClusterDisableErrors")
.addCounter("numJobClusterDelete")
.addCounter("numJobClusterDeleteErrors")
.addCounter("numJobClusterUpdate")
.addCounter("numJobClusterUpdateErrors")
.addCounter("numSLAEnforcementExecutions")
.addCounter("numJobsStuckInAccepted")
.addGauge(new GaugeCallback(metricGroupId, "acceptedJobsGauge", () -> 1.0 * this.jobManager.acceptedJobsCount()))
.addGauge(new GaugeCallback(metricGroupId, "activeJobsGauge", () -> 1.0 * this.jobManager.activeJobsCount()))
.addGauge(new GaugeCallback(metricGroupId, "terminatingJobsGauge", () -> 1.0 * this.jobManager.terminatingJobsMap.size()))
.addGauge(new GaugeCallback(metricGroupId, "completedJobsGauge", () -> 1.0 * this.jobManager.completedJobsCache.completedJobs.size()))
.addGauge(new GaugeCallback(metricGroupId, "actorToJobIdMappingsGauge", () -> 1.0 * this.jobManager.actorToJobIdMap.size()))
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
this.numJobSubmissions = m.getCounter("numJobSubmissions");
this.numJobActorCreationCounter = m.getCounter("numJobActorCreationCounter");
this.numJobSubmissionFailures = m.getCounter("numJobSubmissionFailures");
this.numJobShutdowns = m.getCounter("numJobShutdowns");
this.numJobsInitialized = m.getCounter("numJobsInitialized");
this.numJobClustersInitialized = m.getCounter("numJobClustersInitialized");
this.numJobClusterInitializeFailures = m.getCounter("numJobClusterInitializeFailures");
this.numJobClusterEnable = m.getCounter("numJobClusterEnable");
this.numJobClusterDisable = m.getCounter("numJobClusterDisable");
this.numJobClusterDelete = m.getCounter("numJobClusterDelete");
this.numJobClusterUpdate = m.getCounter("numJobClusterUpdate");
this.numJobClusterEnableErrors = m.getCounter("numJobClusterEnableErrors");
this.numJobClusterDisableErrors = m.getCounter("numJobClusterDisableErrors");
this.numJobClusterDeleteErrors = m.getCounter("numJobClusterDeleteErrors");
this.numJobClusterUpdateErrors = m.getCounter("numJobClusterUpdateErrors");
this.numSLAEnforcementExecutions = m.getCounter("numSLAEnforcementExecutions");
this.numJobsStuckInAccepted = m.getCounter("numJobsStuckInAccepted");
}
@Override
public Receive createReceive() {
return buildInitialBehavior();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
JobCluster Actor behaviors 30 total
// CLUSTER RELATED
* - Init
* - UpdateJC
* - UpdateLabel
* - UpdateSLA
* - UpdateArtifact
* - UpdateMigrationStrat
*
* - ENABLE JC
* - DISABLE JC
*
* - GET CLUSTER
* - DELETE
*
* - ENFORCE SLA
* - TRIGGER CRON
* - EXPIRE OLD JOBS
*
* - LIST archived workers
* - LIST completed jobs
* - GET LAST SUBMITTED JOB
* - LIST JOB IDS
* - LIST JOBS
* - LIST WORKERS -> (pass thru to each Job Actor)
*
* // pass thru to JOB
* - SUBMIT JOB -> (INIT JOB on Job Actor)
* - GET JOB -> (pass thru Job Actor)
* - GET JOB SCHED INFO -> (pass thru Job Actor)
* - KILL JOB -> (pass thru Job Actor)
* - RESUBMIT WORKER -> (pass thru Job Actor)
* - KILL JOB Response
* - JOB SHUTDOWN EVENT
* - WORKER EVENT -> (pass thru Job Actor)
* - SCALE JOB -> (pass thru Job Actor)
*
* - JOB INITED
* - JOB STARTED
* - GetJobDefinitionUpdatedFromJobActorResponse (resume job submit request)
*/
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* DISABLED BEHAVIOR
* EXPECTED EVENTS (14)
*
*
* - UpdateJC
* - UpdateLabel
* - UpdateSLA
* - UpdateArtifact
* - UpdateMigrationStrat
* - ENABLE JC
* - GET CLUSTER
* - DELETE
* - LIST archived workers
* - LIST completed jobs
* - KILL JOB Response
* - JOB SHUTDOWN EVENT
* - EXPIRE OLD JOBS
* - WORKER EVENT ( KILL WORKER)
*
* UNEXPECTED EVENTS (16)
* - Init
* - DISABLE JC
* - ENFORCE SLA
* - TRIGGER CRON
* - LIST JOB IDS
* - LIST JOBS
* - LIST WORKERS -> (pass thru to each Job Actor)
* - SUBMIT JOB -> (INIT JOB on Job Actor)
* - GetJobDefinitionUpdatedFromJobActorResponse (resume job submit request)
* - GET JOB -> (pass thru Job Actor)
* - GET JOB SCHED INFO -> (pass thru Job Actor)
* - KILL JOB -> (pass thru Job Actor)
* - RESUBMIT WORKER -> (pass thru Job Actor)
* - SCALE JOB -> (pass thru Job Actor)
* - JOB INITED
* - JOB STARTED
* - GET LAST SUBMITTED JOB
*
* @return
*/
private Receive buildDisabledBehavior() {
String state = "disabled";
return receiveBuilder()
// EXPECTED MESSAGES BEGIN //
.match(UpdateJobClusterRequest.class, this::onJobClusterUpdate)
.match(UpdateJobClusterLabelsRequest.class, this::onJobClusterUpdateLabels)
.match(UpdateJobClusterSLARequest.class, this::onJobClusterUpdateSLA)
.match(UpdateJobClusterArtifactRequest.class, this::onJobClusterUpdateArtifact)
.match(UpdateSchedulingInfo.class, this::onJobClusterUpdateSchedulingInfo)
.match(UpdateJobClusterWorkerMigrationStrategyRequest.class, this::onJobClusterUpdateWorkerMigrationConfig)
.match(GetJobClusterRequest.class , this::onJobClusterGet)
.match(JobClusterProto.DeleteJobClusterRequest.class, this::onJobClusterDelete)
.match(ListArchivedWorkersRequest.class, this::onListArchivedWorkers)
.match(ListCompletedJobsInClusterRequest.class, this::onJobListCompleted)
.match(JobClusterProto.KillJobResponse.class, this::onKillJobResponse)
.match(GetJobDetailsRequest.class, this::onGetJobDetailsRequest)
.match(WorkerEvent.class, this::onWorkerEvent)
.match(JobClusterProto.ExpireOldJobsRequest.class, this::onExpireOldJobs)
.match(EnableJobClusterRequest.class, this::onJobClusterEnable)
.match(Terminated.class, this::onTerminated)
// EXPECTED MESSAGES END //
// UNEXPECTED MESSAGES BEGIN //
// from user job submit request
.match(SubmitJobRequest.class, (x) -> getSender().tell(new SubmitJobResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty() ), getSelf()))
.match(GetJobDefinitionUpdatedFromJobActorResponse.class, (x) -> getSender().tell(new SubmitJobResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty() ), getSelf()))
.match(ResubmitWorkerRequest.class, (x) -> getSender().tell(new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf()))
.match(JobProto.JobInitialized.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state)))
.match(JobStartedEvent.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state)))
.match(ScaleStageRequest.class, (x) -> getSender().tell(new ScaleStageResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), 0), getSelf()))
.match(KillJobRequest.class, (x) -> x.requestor.tell(new KillJobResponse(x.requestId, CLIENT_ERROR, JobState.Noop, genUnexpectedMsg(x.toString(), this.name, state), x.jobId, x.user), getSelf()))
.match(GetJobDetailsRequest.class, (x) -> getSender().tell(new GetJobDetailsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf()))
.match(GetJobSchedInfoRequest.class, (x) -> getSender().tell(new GetJobSchedInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf()))
.match(GetLatestJobDiscoveryInfoRequest.class, (x) -> getSender().tell(new GetLatestJobDiscoveryInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf()))
.match(GetLastSubmittedJobIdStreamRequest.class, (x) -> getSender().tell(new GetLastSubmittedJobIdStreamResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf()))
.match(ListJobIdsRequest.class, (x) -> getSender().tell(new ListJobIdsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), new ArrayList()), getSelf()))
.match(ListJobsRequest.class, (x) -> getSender().tell(new ListJobsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), new ArrayList()), getSelf()))
.match(ListWorkersRequest.class, (x) -> getSender().tell(new ListWorkersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), new ArrayList()), getSelf()))
.match(JobClusterProto.EnforceSLARequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state)))
.match(JobClusterProto.TriggerCronRequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state)))
.match(DisableJobClusterRequest.class, (x) -> getSender().tell(new DisableJobClusterResponse(x.requestId, SUCCESS,"Cluster is already disabled"), getSelf()))
.match(Terminated.class, this::onTerminated)
.match(JobClusterProto.InitializeJobClusterRequest.class, (x) -> getSender().tell(new JobClustersManagerInitializeResponse(x.requestId, SUCCESS,"Cluster is already initialized"), getSelf()))
// UNEXPECTED MESSAGES END //
.matchAny(x -> logger.warn("unexpected message '{}' received by JobCluster actor {} in Disabled State", x, this.name))
.build();
}
private String genUnexpectedMsg(String event, String cluster, String state) {
return String.format("Unexpected message %s received by JobCluster actor %s in %s State", event, cluster, state);
}
/**
* INITIAL BEHAVIOR
* EXPECTED EVENTS (1)
* - Init
*
*
* UNEXPECTED EVENTS (29)
* - UpdateJC
* - UpdateLabel
* - UpdateSLA
* - UpdateArtifact
* - UpdateMigrationStrat
* - ENABLE JC
* - GET CLUSTER
* - DELETE
* - LIST archived workers
* - LIST completed jobs
* - KILL JOB Response
* - JOB SHUTDOWN EVENT
* - EXPIRE OLD JOBS
* - WORKER EVENT ( KILL WORKER)
* - DISABLE JC
* - ENFORCE SLA
* - TRIGGER CRON
* - LIST JOB IDS
* - LIST JOBS
* - LIST WORKERS -> (pass thru to each Job Actor)
* - SUBMIT JOB -> (INIT JOB on Job Actor)
* - GetJobDefinitionUpdatedFromJobActorResponse (resume job submit request)
* - GET JOB -> (pass thru Job Actor)
* - GET JOB SCHED INFO -> (pass thru Job Actor)
* - KILL JOB -> (pass thru Job Actor)
* - RESUBMIT WORKER -> (pass thru Job Actor)
* - SCALE JOB -> (pass thru Job Actor)
* - JOB INITED
* - JOB STARTED
* - GET LAST SUBMITTED JOB
*
* @return
*/
private Receive buildInitialBehavior() {
String state = "Uninited";
return receiveBuilder()
// EXPECTED MESSAGES BEGIN //
.match(JobClusterProto.InitializeJobClusterRequest.class, this::onJobClusterInitialize)
// EXPECTED MESSAGES END //
// UNEXPECTED MESSAGES BEGIN //
.match(UpdateJobClusterRequest.class, (x) -> getSender().tell(new UpdateJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf()))
.match(UpdateJobClusterLabelsRequest.class, (x) -> getSender().tell(new UpdateJobClusterLabelsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf()))
.match(UpdateJobClusterSLARequest.class, (x) -> getSender().tell(new UpdateJobClusterSLAResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf()))
.match(UpdateJobClusterArtifactRequest.class, (x) -> getSender().tell(new UpdateJobClusterArtifactResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf()))
.match(UpdateSchedulingInfo.class, (x) -> getSender().tell(new UpdateSchedulingInfoResponse(x.getRequestId(), CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf()))
.match(UpdateJobClusterWorkerMigrationStrategyRequest.class, (x) -> getSender().tell(new UpdateJobClusterWorkerMigrationStrategyResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf()))
.match(GetJobClusterRequest.class, (x) -> getSender().tell(new GetJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty() ), getSelf()))
.match(JobClusterProto.DeleteJobClusterRequest.class, (x) -> getSender().tell(new DeleteJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf()))
.match(ListArchivedWorkersRequest.class, (x) -> getSender().tell(new ListArchivedWorkersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), Lists.newArrayList()), getSelf()))
.match(ListCompletedJobsInClusterRequest.class, (x) -> getSender().tell(new ListCompletedJobsInClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), Lists.newArrayList()), getSelf()))
.match(JobClusterProto.KillJobResponse.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state)))
.match(GetJobDetailsRequest.class, (x) -> getSender().tell(new GetJobDetailsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf()))
.match(WorkerEvent.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state)))
.match(JobClusterProto.ExpireOldJobsRequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state)))
.match(EnableJobClusterRequest.class, (x) -> getSender().tell(new EnableJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf()))
.match(SubmitJobRequest.class, (x) -> getSender().tell(new SubmitJobResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty() ), getSelf()))
.match(GetJobDefinitionUpdatedFromJobActorResponse.class, (x) -> getSender().tell(new SubmitJobResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty() ), getSelf()))
.match(ResubmitWorkerRequest.class, (x) -> getSender().tell(new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf()))
.match(JobProto.JobInitialized.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state)))
.match(JobStartedEvent.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state)))
.match(ScaleStageRequest.class, (x) -> getSender().tell(new ScaleStageResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), 0), getSelf()))
.match(KillJobRequest.class, (x) -> getSender().tell(new KillJobResponse(x.requestId, CLIENT_ERROR, JobState.Noop, genUnexpectedMsg(x.toString(), this.name, state), x.jobId, x.user), getSelf()))
.match(GetJobSchedInfoRequest.class, (x) -> getSender().tell(new GetJobSchedInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf()))
.match(GetLatestJobDiscoveryInfoRequest.class, (x) -> getSender().tell(new GetLatestJobDiscoveryInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf()))
.match(GetLastSubmittedJobIdStreamRequest.class, (x) -> getSender().tell(new GetLastSubmittedJobIdStreamResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf()))
.match(ListJobIdsRequest.class, (x) -> getSender().tell(new ListJobIdsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), Lists.newArrayList()), getSelf()))
.match(ListJobsRequest.class, (x) -> getSender().tell(new ListJobsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), Lists.newArrayList()), getSelf()))
.match(ListWorkersRequest.class, (x) -> getSender().tell(new ListWorkersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), Lists.newArrayList()), getSelf()))
.match(JobClusterProto.EnforceSLARequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state)))
.match(JobClusterProto.ExpireOldJobsRequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state)))
.match(JobClusterProto.TriggerCronRequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state)))
.match(DisableJobClusterRequest.class, (x) -> getSender().tell(new DisableJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf()))
.match(Terminated.class, this::onTerminated)
// UNEXPECTED MESSAGES END //
.matchAny(x -> logger.warn("unexpected message '{}' received by JobCluster actor {} in Uninited State", x, this.name))
.build();
}
/**
* INITED BEHAVIOR
* EXPECTED EVENTS (29)
* - UpdateJC
* - UpdateLabel
* - UpdateSLA
* - UpdateArtifact
* - UpdateMigrationStrat
* - ENABLE JC
* - GET CLUSTER
* - DELETE
* - LIST archived workers
* - LIST completed jobs
* - KILL JOB Response
* - JOB SHUTDOWN EVENT
* - EXPIRE OLD JOBS
* - WORKER EVENT ( KILL WORKER)
* - DISABLE JC
* - ENFORCE SLA
* - TRIGGER CRON
* - LIST JOB IDS
* - LIST JOBS
* - LIST WORKERS -> (pass thru to each Job Actor)
* - SUBMIT JOB -> (INIT JOB on Job Actor)
* - GetJobDefinitionUpdatedFromJobActorResponse (resume job submit request)
* - GET JOB -> (pass thru Job Actor)
* - GET JOB SCHED INFO -> (pass thru Job Actor)
* - KILL JOB -> (pass thru Job Actor)
* - RESUBMIT WORKER -> (pass thru Job Actor)
* - SCALE JOB -> (pass thru Job Actor)
* - JOB INITED
* - JOB STARTED
* - GET LAST SUBMITTED JOB
*
* UNEXPECTED EVENTS (1)
* - Init
*
*
* @return
*/
private Receive buildInitializedBehavior() {
String state = "Initialized";
return receiveBuilder()
// EXPECTED MESSAGES BEGIN //
.match(UpdateJobClusterRequest.class, this::onJobClusterUpdate)
.match(UpdateJobClusterLabelsRequest.class, this::onJobClusterUpdateLabels)
.match(UpdateJobClusterSLARequest.class, this::onJobClusterUpdateSLA)
.match(UpdateJobClusterArtifactRequest.class, this::onJobClusterUpdateArtifact)
.match(UpdateSchedulingInfo.class, this::onJobClusterUpdateSchedulingInfo)
.match(UpdateJobClusterWorkerMigrationStrategyRequest.class,
this::onJobClusterUpdateWorkerMigrationConfig)
.match(EnableJobClusterRequest.class, (x) -> getSender().tell(
new EnableJobClusterResponse(x.requestId, SUCCESS, genUnexpectedMsg(x.toString(),
this.name, state)), getSelf()))
.match(GetJobClusterRequest.class, this::onJobClusterGet)
.match(JobClusterProto.DeleteJobClusterRequest.class, this::onJobClusterDelete)
.match(ListArchivedWorkersRequest.class, this::onListArchivedWorkers)
.match(ListCompletedJobsInClusterRequest.class, this::onJobListCompleted)
.match(JobClusterProto.KillJobResponse.class, this::onKillJobResponse)
.match(JobClusterProto.ExpireOldJobsRequest.class, this::onExpireOldJobs)
.match(WorkerEvent.class, this::onWorkerEvent)
.match(DisableJobClusterRequest.class, this::onJobClusterDisable)
.match(JobClusterProto.EnforceSLARequest.class, this::onEnforceSLARequest)
.match(JobClusterProto.BookkeepingRequest.class, this::onBookkeepingRequest)
.match(JobClusterProto.TriggerCronRequest.class, this::onTriggerCron)
.match(ListJobIdsRequest.class, this::onJobIdList)
.match(ListJobsRequest.class, this::onJobList)
.match(ListWorkersRequest.class, this::onListActiveWorkers)
.match(SubmitJobRequest.class, this::onJobSubmit)
.match(GetJobDefinitionUpdatedFromJobActorResponse.class, this::onGetJobDefinitionUpdatedFromJobActorResponse)
.match(GetJobDetailsRequest.class, this::onGetJobDetailsRequest)
.match(GetJobSchedInfoRequest.class, this::onGetJobStatusSubject)
.match(GetLatestJobDiscoveryInfoRequest.class, this::onGetLatestJobDiscoveryInfo)
.match(KillJobRequest.class, this::onJobKillRequest)
.match(ResubmitWorkerRequest.class, this::onResubmitWorkerRequest)
.match(JobProto.JobInitialized.class, this::onJobInitialized)
.match(JobStartedEvent.class, this::onJobStarted)
.match(GetLastSubmittedJobIdStreamRequest.class, this::onGetLastSubmittedJobIdSubject)
.match(ScaleStageRequest.class, this::onScaleStage)
// EXPECTED MESSAGES END //
// EXPECTED MESSAGES BEGIN //
.match(JobClusterProto.InitializeJobClusterRequest.class,(x) -> getSender().tell(
new JobClustersManagerInitializeResponse(x.requestId, SUCCESS,
"Cluster is already initialized"), getSelf()))
// UNEXPECTED MESSAGES END //
.match(Terminated.class, this::onTerminated)
.matchAny(x -> {
logger.info("unexpected message '{}' received by JobCluster actor {} in Initialized State."
+ "from class {}", x, this.name, x.getClass().getCanonicalName());
// TODO getSender().tell();
})
.build();
}
MetricGroupId getMetricGroupId(String name) {
return new MetricGroupId("JobClusterActor", new BasicTag("jobCluster", name));
}
@Override
public void preStart() throws Exception {
logger.info("JobClusterActor {} started", name);
super.preStart();
}
@Override
public void postStop() throws Exception {
logger.info("JobClusterActor {} stopped", name);
super.postStop();
if (name != null) {
// de-register metrics from MetricsRegistry
MetricsRegistry.getInstance().remove(getMetricGroupId(name));
}
}
@Override
public void preRestart(Throwable t, Optional<Object> m) throws Exception {
logger.info("{} preRestart {} (exc: {})", name, m, t.getMessage());
// do not kill all children, which is the default here
// super.preRestart(t, m);
}
@Override
public void postRestart(Throwable reason) throws Exception {
logger.info("{} postRestart (exc={})", name, reason.getMessage());
super.postRestart(reason);
}
@Override
public SupervisorStrategy supervisorStrategy() {
// custom supervisor strategy to resume the child actors on Exception instead of the default restart
return MantisActorSupervisorStrategy.getInstance().create();
}
private void setBookkeepingTimer(long checkAgainInSecs) {
getTimers().startPeriodicTimer(BOOKKEEPING_TIMER_KEY, new JobClusterProto.BookkeepingRequest(),
Duration.ofSeconds(checkAgainInSecs));
}
private void setExpiredJobsTimer(long checkAgainInSecs) {
getTimers().startPeriodicTimer(CHECK_EXPIRED_TIMER_KEY, new JobClusterProto.ExpireOldJobsRequest(),
Duration.ofSeconds(checkAgainInSecs));
}
/**
* Initialize cluster request sent by JCM. Called in following cases.
* 1. Master bootup : Already exists in DB
* 2. new cluster is being created : Requires the createInStore flag to be set. If writing to DB fails a
* failure message is sent back. The caller should then kill this
* * actor and inform upstream of the failure
*
* @param initReq
*/
@Override
public void onJobClusterInitialize(JobClusterProto.InitializeJobClusterRequest initReq) {
ActorRef sender = getSender();
logger.info("In onJobClusterInitialize {}", this.name);
if (logger.isDebugEnabled()) {
logger.debug("Init Request {}", initReq);
}
jobClusterMetadata = new JobClusterMetadataImpl.Builder()
.withLastJobCount(initReq.lastJobNumber)
.withIsDisabled(initReq.isDisabled)
.withJobClusterDefinition(initReq.jobClusterDefinition)
.build();
// create sla enforcer
slaEnforcer = new SLAEnforcer(jobClusterMetadata.getJobClusterDefinition().getSLA());
long expireFrequency = ConfigurationProvider.getConfig().getCompletedJobPurgeFrequencySeqs();
// If cluster is disabled
if(jobClusterMetadata.isDisabled()) {
logger.info("Cluster {} initialized but is Disabled", jobClusterMetadata
.getJobClusterDefinition().getName());
// add completed jobs to cache to use when / if cluster is reenabled
jobManager.addCompletedJobsToCache(initReq.completedJobsList);
int count = 50;
if(!initReq.jobList.isEmpty()) {
logger.info("Cluster {} is disabled however it has {} active/accepted jobs",
jobClusterMetadata.getJobClusterDefinition().getName(), initReq.jobList.size());
for(IMantisJobMetadata jobMeta : initReq.jobList) {
try {
if(count == 0) {
logger.info("Max cleanup limit of 50 reached abort");
break;
}
if(!JobState.isTerminalState(jobMeta.getState())) {
logger.info("Job {} is in non terminal state {} for disabled cluster {}."
+ "Marking it complete", jobMeta.getJobId(), jobMeta.getState(),
jobClusterMetadata.getJobClusterDefinition().getName());
count--;
jobManager.markCompletedDuringStartup(jobMeta.getJobId(), System.currentTimeMillis(),
jobMeta, JobState.Completed);
jobStore.archiveJob(jobMeta);
}
} catch (Exception e) {
logger.error("Exception {} archiving job {} during init ",e.getMessage(), jobMeta.getJobId(), e);
}
}
}
sender.tell(new JobClusterProto.InitializeJobClusterResponse(initReq.requestId, SUCCESS,
String.format("JobCluster %s initialized successfully. But is currently disabled",
initReq.jobClusterDefinition.getName()),initReq.jobClusterDefinition.getName(),
initReq.requestor), getSelf());
logger.info("Job expiry check frequency set to {}", expireFrequency);
setExpiredJobsTimer(expireFrequency);
getContext().become(disabledBehavior);
return;
} else {
// new cluster initialization
if (initReq.createInStore) {
try {
jobStore.createJobCluster(jobClusterMetadata);
eventPublisher.publishAuditEvent(
new LifecycleEventsProto.AuditEvent(
LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_CREATE,
jobClusterMetadata.getJobClusterDefinition().getName(),
"saved job cluster " + name)
);
logger.info("successfully saved job cluster {}", name);
numJobClustersInitialized.increment();
} catch (final JobClusterAlreadyExistsException exists) {
numJobClusterInitializeFailures.increment();
logger.error("job cluster not created");
sender.tell(new JobClusterProto.InitializeJobClusterResponse(initReq.requestId, CLIENT_ERROR,
String.format("JobCluster %s already exists",
initReq.jobClusterDefinition.getName()),
initReq.jobClusterDefinition.getName(), initReq.requestor), getSelf());
// TODO: handle case when job cluster exists in store but Job cluster actor is not running
return;
} catch (final Exception e) {
numJobClusterInitializeFailures.increment();
logger.error("job cluster not created due to {}", e.getMessage(), e);
sender.tell(new JobClusterProto.InitializeJobClusterResponse(initReq.requestId,
SERVER_ERROR, String.format("JobCluster %s not created due to %s",
initReq.jobClusterDefinition.getName(), Throwables.getStackTraceAsString(e)),
initReq.jobClusterDefinition.getName(), initReq.requestor), getSelf());
// TODO: send PoisonPill to self if job cluster was not created ? Return'ing for now,
// so we don't send back 2 InitJobClusterResponses
return;
}
}
try {
cronManager = new CronManager(name, getSelf(), jobClusterMetadata.getJobClusterDefinition().getSLA());
} catch (Exception e) {
logger.warn("Exception initializing cron", e);
}
initRunningJobs(initReq, sender);
setExpiredJobsTimer(expireFrequency);
logger.info("Job expiry check frequency set to {}", expireFrequency);
try {
jobManager.addCompletedJobsToCache(initReq.completedJobsList);
} catch(Exception e) {
logger.warn("Exception initializing completed jobs " + e.getMessage(), e);
}
}
}
/**
* Iterate through list of jobs in Active jobs table.
* if a Job is completed move it completed table
* else bootstrap the job (create actor, send init request)
* Finally setup sla enforcement
* @param initReq
* @param sender
*/
private void initRunningJobs(JobClusterProto.InitializeJobClusterRequest initReq, ActorRef sender) {
List<CompletedJob> completedJobsList = initReq.completedJobsList;
List<IMantisJobMetadata> jobList = initReq.jobList;
logger.info("In _initJobs for cluster {}: {} activeJobs and {} completedJobs", name, jobList.size(),
completedJobsList.size());
if (logger.isDebugEnabled()) {
logger.debug("In _initJobs for cluster {} activeJobs -> {} and completedJobs -> {}", name, jobList,
completedJobsList);
}
Observable.from(jobList)
.flatMap((jobMeta) -> {
if(JobState.isTerminalState(jobMeta.getState())) {
jobManager.persistToCompletedJobAndArchiveJobTables(jobMeta);
return Observable.empty();
} else {
if(jobMeta.getSchedulingInfo() == null) {
logger.error("Scheduling info is null for active job {} in cluster {}."
+ "Skipping bootstrap ", jobMeta.getJobId(), name);
return Observable.empty();
} else {
return Observable.just(jobMeta);
}
}
})
//
.flatMap((jobMeta) -> jobManager.bootstrapJob((MantisJobMetadataImpl)jobMeta, this.jobClusterMetadata))
.subscribe((jobInited) -> {
logger.info("Job Id {} initialized with code {}", jobInited.jobId, jobInited.responseCode);
},
(error) -> logger.warn("Exception initializing jobs {}", error.getMessage())
,() -> {
// Push the last jobId
if(initReq.jobList.size() > 0) {
JobId lastJobId = new JobId(this.name, initReq.lastJobNumber);
this.jobIdSubmissionSubject.onNext(lastJobId);
}
setBookkeepingTimer(BOOKKEEPING_INTERVAL_SECS);
getContext().become(initializedBehavior);
logger.info("Job Cluster {} initialized", this.name);
sender.tell(new JobClusterProto.InitializeJobClusterResponse(initReq.requestId, SUCCESS,
String.format("JobCluster %s initialized successfully",
initReq.jobClusterDefinition.getName()),
initReq.jobClusterDefinition.getName(), initReq.requestor), getSelf());
}
);
}
@Override
public void onJobClusterUpdate(final UpdateJobClusterRequest request) {
final String name = request.getJobClusterDefinition().getName();
final ActorRef sender = getSender();
String givenArtifactVersion = request.getJobClusterDefinition().getJobClusterConfig().getVersion();
if (!isVersionUnique(givenArtifactVersion, jobClusterMetadata.getJobClusterDefinition()
.getJobClusterConfigs())) {
String msg = String.format("Job cluster %s not updated as the version %s is not unique", name,
givenArtifactVersion);
logger.error(msg);
sender.tell(new UpdateJobClusterResponse(request.requestId, CLIENT_ERROR, msg), getSelf());
return;
}
IJobClusterDefinition currentJobClusterDefinition = jobClusterMetadata.getJobClusterDefinition();
JobClusterDefinitionImpl mergedJobClusterDefinition = new JobClusterDefinitionImpl.Builder()
.mergeConfigsAndOverrideRest(currentJobClusterDefinition, request.getJobClusterDefinition()).build();
IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder()
.withIsDisabled(jobClusterMetadata.isDisabled())
.withLastJobCount(jobClusterMetadata.getLastJobCount())
.withJobClusterDefinition(mergedJobClusterDefinition)
.build();
try {
updateAndSaveJobCluster(jobCluster);
sender.tell(new UpdateJobClusterResponse(request.requestId, SUCCESS, name
+ " Job cluster updated"), getSelf());
numJobClusterUpdate.increment();
} catch (Exception e) {
logger.error("job cluster not created");
sender.tell(new UpdateJobClusterResponse(request.requestId, SERVER_ERROR, name
+ " Job cluster updation failed " + e.getMessage()), getSelf());
numJobClusterUpdateErrors.increment();
}
}
@Override
public void onJobClusterDelete(final JobClusterProto.DeleteJobClusterRequest request) {
final ActorRef sender = getSender();
try {
if(jobManager.isJobListEmpty()) {
jobManager.cleanupAllCompletedJobs();
jobStore.deleteJobCluster(name);
logger.info("successfully deleted job cluster {}", name);
eventPublisher.publishAuditEvent(
new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_DELETE, name, name + " deleted")
);
sender.tell(new JobClusterProto.DeleteJobClusterResponse(request.requestId, SUCCESS, name + " deleted", request.requestingActor, name), getSelf());
numJobClusterDelete.increment();
} else {
logger.warn("job cluster {} cannot be deleted as it has active jobs", name);
sender.tell(new JobClusterProto.DeleteJobClusterResponse(request.requestId, CLIENT_ERROR, name + " Job cluster deletion failed as there are active jobs", request.requestingActor,name), getSelf());
}
} catch( Exception e) {
logger.error("job cluster {} not deleted", name);
sender.tell(new JobClusterProto.DeleteJobClusterResponse(request.requestId, SERVER_ERROR, name + " Job cluster deletion failed " + e.getMessage(), request.requestingActor,name), getSelf());
numJobClusterDeleteErrors.increment();
}
}
@Override
public void onJobIdList(final ListJobIdsRequest request) {
if(logger.isTraceEnabled()) { logger.trace("Entering JCA:onJobIdList"); }
final ActorRef sender = getSender();
Set<JobId> jobIdsFilteredByLabelsSet = new HashSet<>();
// If labels criterion is given prefilter by labels
if(!request.getCriteria().getMatchingLabels().isEmpty()) {
jobIdsFilteredByLabelsSet = jobManager.getJobsMatchingLabels(request.getCriteria().getMatchingLabels(), request.getCriteria().getLabelsOperand());
// Found no matching jobs for given labels exit
if(jobIdsFilteredByLabelsSet.isEmpty()) {
sender.tell(new ListJobIdsResponse(request.requestId, SUCCESS, "No JobIds match given Label criterion", new ArrayList<>()), sender);
if(logger.isTraceEnabled()) { logger.trace("Exit JCA:onJobIdList"); }
return;
}
}
// Found jobs matching labels or no labels criterion given.
List<JobIdInfo> jobIdList;
// Apply additional filtering to non terminal jobs
jobIdList = getFilteredNonTerminalJobIdList(request.filters, jobIdsFilteredByLabelsSet);
if(!request.getCriteria().getActiveOnly().orElse(true)) {
jobIdList.addAll(getFilteredTerminalJobIdList(request.filters, jobIdsFilteredByLabelsSet));
}
sender.tell(new ListJobIdsResponse(request.requestId, SUCCESS, "", jobIdList), sender);
if(logger.isTraceEnabled()) { logger.trace("Exit JCA:onJobIdList"); }
}
@Override
public void onJobList(final ListJobsRequest request) {
if(logger.isDebugEnabled()) { logger.info("Entering JCA:onJobList"); }
final ActorRef sender = getSender();
final ActorRef self = getSelf();
Set<JobId> jobIdsFilteredByLabelsSet = new HashSet<>();
// If labels criterion is given prefilter by labels
if(!request.getCriteria().getMatchingLabels().isEmpty()) {
jobIdsFilteredByLabelsSet = jobManager.getJobsMatchingLabels(request.getCriteria().getMatchingLabels(), request.getCriteria().getLabelsOperand());
// Found no jobs matching labels exit
if(jobIdsFilteredByLabelsSet.isEmpty()) {
if(logger.isTraceEnabled()) { logger.trace("Exit JCA:onJobList {}" , jobIdsFilteredByLabelsSet.size()); }
sender.tell(new ListJobsResponse(request.requestId, SUCCESS, "", new ArrayList<>()), self);
return;
}
}
// Found jobs matching labels or no labels criterion given.
// Apply additional criterion to both active and completed jobs
getFilteredNonTerminalJobList(request.getCriteria(),jobIdsFilteredByLabelsSet).mergeWith(getFilteredTerminalJobList(request.getCriteria(),jobIdsFilteredByLabelsSet))
.collect(() -> Lists.<MantisJobMetadataView>newArrayList(), List::add)
.doOnNext(resultList -> {
if(logger.isTraceEnabled()) { logger.trace("Exit JCA:onJobList {}" , resultList.size()); }
sender.tell(new ListJobsResponse(request.requestId, SUCCESS, "", resultList), self);
})
.subscribe();
}
@Override
public void onListArchivedWorkers(final ListArchivedWorkersRequest request) {
if(logger.isTraceEnabled()) { logger.trace("In onListArchiveWorkers {}", request); }
try {
List<IMantisWorkerMetadata> workerList = jobStore.getArchivedWorkers(request.getJobId().getId());
if(workerList.size() > request.getLimit()) {
workerList = workerList.subList(0, request.getLimit());
}
if(logger.isTraceEnabled()) { logger.trace("Returning {} archived Workers", workerList.size()); }
getSender().tell(new ListArchivedWorkersResponse(request.requestId, SUCCESS, "", workerList), getSelf());
} catch(Exception e) {
logger.error("Exception listing archived workers", e);
getSender().tell(new ListArchivedWorkersResponse(request.requestId, SERVER_ERROR, "Exception getting archived workers for job " + request.getJobId() + " -> " + e.getMessage(), Lists.newArrayList()), getSelf());
}
}
public void onListActiveWorkers(final ListWorkersRequest r) {
if(logger.isTraceEnabled()) { logger.trace("Enter JobClusterActor:onListActiveWorkers {}", r); }
Optional<JobInfo> jobInfo = jobManager.getJobInfoForNonTerminalJob(r.getJobId());
if(jobInfo.isPresent()) {
jobInfo.get().jobActor.forward(r, getContext());
} else {
logger.warn("No such active job {} ", r.getJobId());
getSender().tell(new ListWorkersResponse(r.requestId,CLIENT_ERROR,"No such active job " + r.getJobId(), Lists.newArrayList()),getSelf());
}
if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:onListActiveWorkers {}", r); }
}
private List<JobIdInfo> getFilteredNonTerminalJobIdList(ListJobCriteria request, Set<JobId> prefilteredJobIdSet) {
if(logger.isTraceEnabled()) { logger.trace("Enter JobClusterActor:getFilteredNonTerminalJobIdList {}", request); }
if((request.getJobState().isPresent() && request.getJobState().get().equals(JobState.MetaState.Terminal))) {
if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredNonTerminalJobIdList with empty"); }
return Collections.emptyList();
}
List<JobInfo> jobInfoList;
if(!prefilteredJobIdSet.isEmpty()) {
jobInfoList = prefilteredJobIdSet.stream().map((jId) -> jobManager.getJobInfoForNonTerminalJob(jId))
.filter((jInfoOp) -> jInfoOp.isPresent()).map((jInfoOp) -> jInfoOp.get()).collect(Collectors.toList());
} else {
jobInfoList = jobManager.getAllNonTerminalJobsList();
}
List<JobInfo> shortenedList = jobInfoList.subList(0, Math.min(jobInfoList.size(), request.getLimit().orElse(DEFAULT_LIMIT)));
List<JobIdInfo> jIdList = shortenedList.stream()
.map((JobInfo jInfo) -> new JobIdInfo.Builder()
.withJobId(jInfo.jobId)
.withJobState(jInfo.state)
.withSubmittedAt(jInfo.submittedAt)
.withTerminatedAt(jInfo.terminatedAt)
.withUser(jInfo.user)
.withVersion(jInfo.jobDefinition.getVersion())
.build())
.collect(Collectors.toList());;
if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredNonTerminalJobIdList {}", jIdList.size()); }
return jIdList;
}
private List<JobIdInfo> getFilteredTerminalJobIdList(ListJobCriteria request, Set<JobId> prefilteredJobIdSet) {
if(logger.isTraceEnabled()) { logger.trace("Enter JobClusterActor:getFilteredTerminalJobIdList {}", request); }
if((request.getJobState().isPresent() && !request.getJobState().get().equals(JobState.MetaState.Terminal))) {
if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredTerminalJobIdList with empty"); }
return Collections.emptyList();
} else if(!request.getJobState().isPresent() && (request.getActiveOnly().isPresent() && request.getActiveOnly().get())) {
if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredTerminalJobIdList with empty"); }
return Collections.emptyList();
}
List<CompletedJob> completedJobsList;
if(!prefilteredJobIdSet.isEmpty()) {
completedJobsList = prefilteredJobIdSet.stream().map((jId) -> jobManager.getCompletedJob(jId)).filter((cjOp) -> cjOp.isPresent()).map((cjop) -> cjop.get()).collect(Collectors.toList());
} else {
completedJobsList = jobManager.getCompletedJobsList();
}
List<CompletedJob> subsetCompletedJobs = completedJobsList.subList(0, Math.min(completedJobsList.size(), request.getLimit().orElse(DEFAULT_LIMIT)));
List<JobIdInfo> completedJobIdList = subsetCompletedJobs.stream()
.map((CompletedJob cJob) -> new JobIdInfo.Builder()
.withJobIdStr(cJob.getJobId())
.withVersion(cJob.getVersion())
.withUser(cJob.getUser())
.withSubmittedAt(cJob.getSubmittedAt())
.withTerminatedAt(cJob.getTerminatedAt())
.withJobState(cJob.getState())
.build())
.filter(Objects::nonNull)
.collect(Collectors.toList());
if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredTerminalJobIdList {}", completedJobIdList.size()); }
return completedJobIdList;
}
private Observable<MantisJobMetadataView> getFilteredNonTerminalJobList(ListJobCriteria request, Set<JobId> prefilteredJobIdSet) {
if(logger.isTraceEnabled()) { logger.trace("Entering JobClusterActor:getFilteredNonTerminalJobList"); }
Duration timeout = Duration.ofMillis(500);
if((request.getJobState().isPresent() && request.getJobState().get().equals(JobState.MetaState.Terminal))) {
if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredNonTerminalJobList with empty"); }
return Observable.empty();
}
List<JobInfo> jobInfoList;
//
if(!prefilteredJobIdSet.isEmpty()) {
jobInfoList = prefilteredJobIdSet.stream().map((jId) -> jobManager.getJobInfoForNonTerminalJob(jId))
.filter((jInfoOp) -> jInfoOp.isPresent()).map((jInfoOp) -> jInfoOp.get()).collect(Collectors.toList());
} else {
// no prefiltering applied start with complete set of non terminal jobs
jobInfoList = jobManager.getAllNonTerminalJobsList();
}
List<JobInfo> shortenedList = jobInfoList.subList(0, Math.min(jobInfoList.size(), request.getLimit().orElse(DEFAULT_ACTIVE_JOB_LIMIT)));
if(logger.isDebugEnabled()) { logger.debug("List of non terminal jobs {}", jobInfoList); }
return Observable.from(shortenedList)
.flatMap((jInfo) -> {
GetJobDetailsRequest req = new GetJobDetailsRequest("system", jInfo.jobId);
CompletionStage<GetJobDetailsResponse> respCS = ask(jInfo.jobActor, req, timeout)
.thenApply(GetJobDetailsResponse.class::cast);
return Observable.from(respCS.toCompletableFuture(), Schedulers.io())
.onErrorResumeNext(ex -> {
logger.warn("caught exception {}", ex.getMessage(), ex);
return Observable.empty();
});
})
.filter((resp) -> resp != null && resp.getJobMetadata().isPresent())
.map((resp) -> resp.getJobMetadata().get())
.map((metaData) -> new MantisJobMetadataView(metaData, request.getStageNumberList(),
request.getWorkerIndexList(), request.getWorkerNumberList(), request.getWorkerStateList(),false));
}
/**
* JobState ActiveOnly Execute?
* None None Y
* None TRUE N
* None FALSE Y
* Active None N
* Active TRUE N
* Active FALSE N
* Terminal None Y
* Terminal TRUE Y
* Terminal FALSE Y
* @param request
* @return
*/
private Observable<MantisJobMetadataView> getFilteredTerminalJobList(ListJobCriteria request, Set<JobId> jobIdSet) {
if(logger.isTraceEnabled()) { logger.trace("JobClusterActor:getFilteredTerminalJobList"); }
if((request.getJobState().isPresent() && !request.getJobState().get().equals(JobState.MetaState.Terminal))) {
if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredTerminalJobList with empty"); }
return Observable.empty();
} else if(!request.getJobState().isPresent() && (request.getActiveOnly().isPresent() && request.getActiveOnly().get())) {
if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredTerminalJobList with empty"); }
return Observable.empty();
}
List<CompletedJob> jobInfoList;
if(!jobIdSet.isEmpty()) {
jobInfoList = jobIdSet.stream().map((jId) -> jobManager.getCompletedJob(jId))
.filter((compJobOp) -> compJobOp.isPresent()).map((compJobOp) -> compJobOp.get()).collect(Collectors.toList());
} else {
jobInfoList = jobManager.getCompletedJobsList();
}
List<CompletedJob> shortenedList = jobInfoList.subList(0, Math.min(jobInfoList.size(), request.getLimit().orElse(DEFAULT_LIMIT)));
return Observable.from(shortenedList)
// terminatedAt comes from completed Job hence the different structure
.flatMap((cJob) -> {
try {
if(logger.isDebugEnabled()) { logger.debug("Fetching details for completed job {}", cJob); }
Optional<IMantisJobMetadata> metaOp = jobManager.getJobDataForCompletedJob(cJob.getJobId());
if(metaOp.isPresent()) {
if(logger.isDebugEnabled()) { logger.debug ("Fetched details for completed job {} -> {}", cJob, metaOp.get()); }
return Observable.just(new MantisJobMetadataView(metaOp.get(),cJob.getTerminatedAt(), request.getStageNumberList(),
request.getWorkerIndexList(), request.getWorkerNumberList(), request.getWorkerStateList(),false));
}
} catch(Exception e) {
logger.error("caught exception", e);
return Observable.empty();
}
return Observable.empty();
});
}
@Override
public void onJobListCompleted(final ListCompletedJobsInClusterRequest request) {
if(logger.isTraceEnabled()) { logger.trace ("Enter onJobListCompleted {}", request); }
final ActorRef sender = getSender();
List<CompletedJob> completedJobsList = jobManager.getCompletedJobsList();
if(request.getLimit() > completedJobsList.size()) {
completedJobsList = completedJobsList.subList(0, request.getLimit());
}
sender.tell(new ListCompletedJobsInClusterResponse(request.requestId, SUCCESS, "", completedJobsList), sender);
if(logger.isTraceEnabled()) { logger.trace ("Exit onJobListCompleted {}", completedJobsList.size()); }
}
@Override
public void onJobClusterDisable(final DisableJobClusterRequest req) {
if(logger.isTraceEnabled()) { logger.trace("Enter onJobClusterDisable {}", req); }
ActorRef sender = getSender();
try {
IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl.Builder().withIsDisabled(true)
.withLastJobCount(this.jobClusterMetadata.getLastJobCount())
.withJobClusterDefinition((JobClusterDefinitionImpl)this.jobClusterMetadata.getJobClusterDefinition())
.build();
//update store
jobStore.updateJobCluster(jobClusterMetadata);
this.jobClusterMetadata = jobClusterMetadata;
cronManager.destroyCron();
// change behavior to disabled
getContext().become(disabledBehavior);
// send kill requests for all non terminal jobs
List<JobInfo> jobsToKill = new ArrayList<>();
jobsToKill.addAll(jobManager.getAcceptedJobsList());
jobsToKill.addAll(jobManager.getActiveJobsList());
for(JobInfo jobInfo : jobsToKill) {
jobInfo.jobActor.tell(
new KillJobRequest(
jobInfo.jobId, "Job cluster disabled", JobCompletedReason.Killed, req.getUser(), ActorRef.noSender()),
getSelf());
}
// disable SLA check timers
getTimers().cancel(BOOKKEEPING_TIMER_KEY);
eventPublisher.publishAuditEvent(
new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_DISABLED,
jobClusterMetadata.getJobClusterDefinition().getName(),
name + " disabled")
);
sender.tell(new DisableJobClusterResponse(req.requestId, SUCCESS, String.format("%s disabled", name)), getSelf());
numJobClusterDisable.increment();
logger.info("Job Cluster {} is disabbled", this.name);
} catch (Exception e) {
String errorMsg = "Exception disabling cluster " + name + " due to " + e.getMessage();
logger.error(errorMsg,e);
sender.tell(new DisableJobClusterResponse(req.requestId, SERVER_ERROR, errorMsg), getSelf());
numJobClusterDisableErrors.increment();
}
if(logger.isTraceEnabled()) { logger.trace("Exit onJobClusterDisable"); }
}
@Override
public void onJobClusterEnable(final EnableJobClusterRequest req) {
if(logger.isTraceEnabled()) { logger.trace("Enter onJobClusterEnable"); }
ActorRef sender = getSender();
try {
IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl.Builder().withIsDisabled(false)
.withLastJobCount(this.jobClusterMetadata.getLastJobCount())
.withJobClusterDefinition((JobClusterDefinitionImpl)this.jobClusterMetadata.getJobClusterDefinition())
.build();
//update store
jobStore.updateJobCluster(jobClusterMetadata);
this.jobClusterMetadata = jobClusterMetadata;
if (cronManager == null) {
cronManager = new CronManager(name, getSelf(), jobClusterMetadata.getJobClusterDefinition().getSLA());
}
this.cronManager.initCron();
// change behavior to enabled
getContext().become(initializedBehavior);
//start SLA timer
setBookkeepingTimer(BOOKKEEPING_INTERVAL_SECS);
eventPublisher.publishAuditEvent(
new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_ENABLED,
this.jobClusterMetadata.getJobClusterDefinition().getName(), name + " enabled")
);
sender.tell(new EnableJobClusterResponse(req.requestId, SUCCESS, String.format("%s enabled", name)), getSelf());
numJobClusterEnable.increment();
logger.info("Job Cluster {} is Enabled", this.name);
} catch(Exception e) {
String errorMsg = String.format("Exception enabling cluster %s due to %s", name, e.getMessage());
logger.error(errorMsg,e);
sender.tell(new EnableJobClusterResponse(req.requestId, SERVER_ERROR, errorMsg), getSelf());
numJobClusterEnableErrors.increment();
}
if(logger.isTraceEnabled()) { logger.trace("Enter onJobClusterEnable"); }
}
@Override
public void onJobClusterGet(final GetJobClusterRequest request) {
final ActorRef sender = getSender();
if(logger.isTraceEnabled()) { logger.trace("In JobCluster Get " + jobClusterMetadata); }
if(this.name.equals(request.getJobClusterName())) {
MantisJobClusterMetadataView clusterView = generateJobClusterMetadataView(this.jobClusterMetadata, this.jobClusterMetadata.isDisabled(), ofNullable(this.cronManager).map(x -> x.isCronActive).orElse(false));
sender.tell(new GetJobClusterResponse(request.requestId, SUCCESS, "", of(clusterView)), getSelf());
} else {
sender.tell(new GetJobClusterResponse(request.requestId, CLIENT_ERROR, "Cluster Name " + request.getJobClusterName() + " in request Does not match cluster Name " + this.name + " of Job Cluster Actor", Optional.empty()), getSelf());
}
if(logger.isTraceEnabled()) { logger.trace("Exit onJobClusterGet"); }
}
private MantisJobClusterMetadataView generateJobClusterMetadataView(IJobClusterMetadata jobClusterMetadata, boolean isDisabled, boolean cronActive) {
return new MantisJobClusterMetadataView.Builder()
.withName(jobClusterMetadata.getJobClusterDefinition().getName())
.withDisabled(isDisabled)
.withIsReadyForJobMaster(jobClusterMetadata.getJobClusterDefinition().getIsReadyForJobMaster())
.withJars(jobClusterMetadata.getJobClusterDefinition().getJobClusterConfigs())
.withJobOwner(jobClusterMetadata.getJobClusterDefinition().getOwner())
.withLabels(jobClusterMetadata.getJobClusterDefinition().getLabels())
.withLastJobCount(jobClusterMetadata.getLastJobCount())
.withSla(jobClusterMetadata.getJobClusterDefinition().getSLA())
.withMigrationConfig(jobClusterMetadata.getJobClusterDefinition().getWorkerMigrationConfig())
.withParameters(jobClusterMetadata.getJobClusterDefinition().getParameters())
.isCronActive(cronActive)
.withLatestVersion(jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig().getVersion())
.build();
}
@Override
public void onJobSubmit(final SubmitJobRequest request) {
final ActorRef sender = getSender();
// if the job is submitted with a userDefinedType check to see if such a job is already running. If so just reply with a reference to it.
if (request.getJobDefinition().isPresent()) {
String uniq = request.getJobDefinition().get().getJobSla().getUserProvidedType();
if(uniq != null && !uniq.isEmpty()) {
Optional<JobInfo> existingJob = jobManager.getJobInfoByUniqueId(uniq);
if(existingJob.isPresent()) {
logger.info("Job with unique {} already exists, returning its job Id {}", uniq, existingJob.get().jobId);
sender.tell(new SubmitJobResponse(request.requestId, SUCCESS, existingJob.get().jobId.getId(), of(existingJob.get().jobId)), getSelf());
return;
}
}
}
logger.info("Submitting job {}", request);
try {
if (requireJobActorProcess(request)) {
logger.info("Sending job submit request to job actor for inheritance: {}", request.requestId);
return;
}
JobDefinition resolvedJobDefn;
if (request.isSubmitLatest()) {
resolvedJobDefn = fromJobClusterDefinition(request.getSubmitter(), jobClusterMetadata.getJobClusterDefinition());
} else {
resolvedJobDefn = getResolvedJobDefinition(request.getSubmitter(), request.getJobDefinition());
}
eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobClusterStatusEvent(LifecycleEventsProto.StatusEvent.StatusEventType.INFO,
"Job submit request received", jobClusterMetadata.getJobClusterDefinition().getName()));
resolvedJobDefn = LabelManager.insertSystemLabels(resolvedJobDefn, request.isAutoResubmit());
submitJob(resolvedJobDefn, sender, request.getSubmitter());
numJobSubmissions.increment();
} catch (PersistException pe) {
logger.error("Exception submitting job {} from {}", request.getClusterName(), request.getSubmitter(), pe);
numJobSubmissionFailures.increment();
sender.tell(new SubmitJobResponse(request.requestId, SERVER_ERROR, pe.getMessage(), empty()), getSelf());
} catch (Exception e) {
logger.error("Exception submitting job {} from {}", request.getClusterName(), request.getSubmitter(), e);
numJobSubmissionFailures.increment();
sender.tell(new SubmitJobResponse(request.requestId, CLIENT_ERROR, e.getMessage(), empty()), getSelf());
}
}
public void onGetJobDefinitionUpdatedFromJobActorResponse(GetJobDefinitionUpdatedFromJobActorResponse request) {
logger.info("Resuming job submission from job actor");
// this request is returned by job actor but the response needs to be replied to the original job request sender (from API routes).
ActorRef originalSender = request.getOriginalSender();
if (request.responseCode == SERVER_ERROR || request.getJobDefinition() == null) {
logger.error("Failed to retrieve job definition from job actor");
numJobSubmissionFailures.increment();
originalSender.tell(new SubmitJobResponse(request.requestId, SERVER_ERROR, request.message, empty()), getSelf());
return;
}
try {
JobDefinition resolvedJobDefn = request.getJobDefinition();
// for quick submit the artifact version/name needs to be reset using the fork method below.
if (request.isQuickSubmitMode()) {
Optional<JobDefinition> jobDefinitionCloneO = cloneToNewJobDefinitionWithoutArtifactNameAndVersion(
request.getJobDefinition());
if (jobDefinitionCloneO.isPresent()) {
resolvedJobDefn = jobDefinitionCloneO.get();
}
}
resolvedJobDefn = this.jobDefinitionResolver.getResolvedJobDefinition(
request.getUser(), resolvedJobDefn, this.jobClusterMetadata);
eventPublisher.publishStatusEvent(
new LifecycleEventsProto.JobClusterStatusEvent(LifecycleEventsProto.StatusEvent.StatusEventType.INFO,
"Job submit request received", jobClusterMetadata.getJobClusterDefinition().getName()));
resolvedJobDefn = LabelManager.insertSystemLabels(resolvedJobDefn, request.isAutoResubmit());
submitJob(resolvedJobDefn, originalSender, request.getUser());
numJobSubmissions.increment();
} catch (PersistException pe) {
logger.error("Exception submitting job {} from {}", this.name, request.getUser(), pe);
numJobSubmissionFailures.increment();
originalSender.tell(new SubmitJobResponse(request.requestId, SERVER_ERROR, pe.getMessage(), empty()), getSelf());
} catch (Exception e) {
logger.error("Exception submitting job {} from {}", this.name, request.getUser(), e);
numJobSubmissionFailures.increment();
originalSender.tell(new SubmitJobResponse(request.requestId, CLIENT_ERROR, e.getMessage(), empty()), getSelf());
}
}
/**
* If the job request requires process via job actor, inform the target job actor and return true to stop further
* processing till the job actor replies.
* Two cases require job actor level process:
* 1. Quick submit (no job definition given) + valid active last job id.
* 2. Regular submit with inheritance requirement + valid active last job id.
* @param request job submission request.
* @return true if further processing should stop.
*/
private boolean requireJobActorProcess(final SubmitJobRequest request) {
String user = request.getSubmitter();
Optional<JobDefinition> givenJobDefn = request.getJobDefinition();
List<JobInfo> existingJobsList = jobManager.getAllNonTerminalJobsList();
Optional<JobId> lastJobId = JobListHelper.getLastSubmittedJobId(existingJobsList, Collections.emptyList());
if (!lastJobId.isPresent()) {
logger.info("No valid last job id found for inheritance. Skip job actor process step.");
return false;
}
Optional<JobInfo> jobInfoForNonTerminalJob = jobManager.getJobInfoForNonTerminalJob(lastJobId.get());
if (!jobInfoForNonTerminalJob.isPresent()) {
logger.info("Last job id doesn't map to job info instance, skip job actor process. {}", lastJobId.get());
return false;
} else if (request.isSubmitLatest()) {
logger.info("Submit latest job request, skip job actor process. {}", request);
return false;
} else if (!givenJobDefn.isPresent()) {
logger.info("[QuickSubmit] pass to job actor to process job definition: {}", lastJobId.get());
jobInfoForNonTerminalJob.get().jobActor.tell(
new GetJobDefinitionUpdatedFromJobActorRequest(
user, lastJobId.get(), jobInfoForNonTerminalJob.get().jobDefinition,
true, request.isAutoResubmit(), getSender()),
getSelf());
return true;
} else if (givenJobDefn.get().requireInheritInstanceCheck()) {
logger.info("[Inherit request] pass to job actor to process job definition: {}", lastJobId.get());
jobInfoForNonTerminalJob.get().jobActor.tell(
new GetJobDefinitionUpdatedFromJobActorRequest(
user, lastJobId.get(), givenJobDefn.get(),
false, request.isAutoResubmit(), getSender()),
getSelf());
return true;
}
logger.info("request doesn't require job actor process, skip job actor and continue.");
return false;
}
/**
* Two cases
* 1. JobDefinition provided by user: In this case check if labels / parameters or schedulingInfo was not provided
* if that is the case inherit from the Cluster
* 2. If JobDefinition is not provided, find the last submitted job and use its config (quick submit)
* @param user submitter
* @param givenJobDefnOp job defn provided by user in job submit
* @return job definition to be used by the actual submit
* @throws Exception If jobDefinition could not be resolved
*/
private JobDefinition getResolvedJobDefinition(final String user, final Optional<JobDefinition> givenJobDefnOp) throws Exception {
JobDefinition resolvedJobDefn;
if (givenJobDefnOp.isPresent()) {
if (givenJobDefnOp.get().getSchedulingInfo() != null && givenJobDefnOp.get().requireInheritInstanceCheck()) {
logger.warn("Job requires inheriting instance count but has no active non-terminal job.");
}
resolvedJobDefn = givenJobDefnOp.get();
}
else {
// no job definition specified , this is quick submit which is supposed to inherit from last job submitted
// for request inheriting from non-terminal jobs, it has been sent to job actor instead.
Optional<JobDefinition> jobDefnOp = cloneJobDefinitionForQuickSubmitFromArchivedJobs(
jobManager.getCompletedJobsList(), empty(), jobStore);
if (jobDefnOp.isPresent()) {
logger.info("Inherited scheduling Info and parameters from previous job");
resolvedJobDefn = jobDefnOp.get();
} else if (this.jobClusterMetadata != null
&& this.jobClusterMetadata.getJobClusterDefinition() != null &&
this.jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig() != null) {
logger.info("No previous job definition found. Fall back to cluster definition: {}", this.name);
IJobClusterDefinition clusterDefinition = this.jobClusterMetadata.getJobClusterDefinition();
JobClusterConfig clusterConfig =
this.jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig();
resolvedJobDefn = fromJobClusterDefinition(user, clusterDefinition);
logger.info("Built job definition from cluster definition: {}", resolvedJobDefn);
}
else {
throw new Exception("Job Definition could not retrieved from a previous submission (There may "
+ "not be a previous submission)");
}
}
logger.info("Resolved JobDefn {}", resolvedJobDefn);
return this.jobDefinitionResolver.getResolvedJobDefinition(user,resolvedJobDefn,this.jobClusterMetadata);
}
private JobDefinition fromJobClusterDefinition(String user, IJobClusterDefinition clusterDefinition) throws InvalidJobException {
JobClusterConfig clusterConfig = clusterDefinition.getJobClusterConfig();
return
new JobDefinition.Builder()
.withJobSla(new JobSla.Builder().build())
.withArtifactName(clusterConfig.getArtifactName())
.withVersion(clusterConfig.getVersion())
.withLabels(clusterDefinition.getLabels())
.withName(this.name)
.withParameters(clusterDefinition.getParameters())
.withSchedulingInfo(clusterConfig.getSchedulingInfo())
.withUser(user)
.build();
}
private void submitJob(JobDefinition jobDefinition, ActorRef sender, String user) throws PersistException {
if (logger.isTraceEnabled()) { logger.trace("Enter submitJobb"); }
JobId jId = null;
try {
validateJobDefinition(jobDefinition);
long lastJobIdNumber = jobClusterMetadata.getLastJobCount();
jId = new JobId(name, ++lastJobIdNumber);
final int heartbeatIntervalSecs = jobDefinition.getIntSystemParameter(JOB_WORKER_HEARTBEAT_INTERVAL_SECS, 0);
final int workerTimeoutSecs = jobDefinition.getIntSystemParameter(JOB_WORKER_TIMEOUT_SECS, 0);
logger.info("Creating new job id: {} with job defn {}, with heartbeat {} and workertimeout {}",
jId, jobDefinition, heartbeatIntervalSecs, workerTimeoutSecs);
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(jId)
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefinition)
.withHeartbeatIntervalSecs(heartbeatIntervalSecs)
.withWorkerTimeoutSecs(workerTimeoutSecs)
.build();
eventPublisher.publishAuditEvent(
new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_SUBMIT,
jId.getId(), jId + " submitter: " + user)
);
jobManager.initJob(mantisJobMetaData, jobClusterMetadata, sender);
numJobActorCreationCounter.increment();
jobClusterMetadata = new JobClusterMetadataImpl.Builder().withJobClusterDefinition((JobClusterDefinitionImpl)this.jobClusterMetadata.getJobClusterDefinition())
.withLastJobCount(lastJobIdNumber)
.withIsDisabled(jobClusterMetadata.isDisabled())
.build();
try {
jobStore.updateJobCluster(jobClusterMetadata);
} catch (Exception e) {
logger.error("Failed to persist job cluster {} error {}", jobClusterMetadata, e.getMessage(), e);
numJobSubmissionFailures.increment();
cleanUpOnJobSubmitFailure(jId);
throw new PersistException(e);
}
jobIdSubmissionSubject.onNext(jId);
numJobSubmissions.increment();
} catch (PersistException pe) {
throw pe;
} catch (InvalidJobRequest e) {
logger.error( "Invalid jobcluster : {} error {}", jobClusterMetadata, e.getMessage(), e);
numJobSubmissionFailures.increment();
throw new IllegalArgumentException(e);
} catch (Exception e) {
logger.error("Exception persisting job in store", e);
numJobSubmissionFailures.increment();
cleanUpOnJobSubmitFailure(jId);
throw new IllegalStateException(e);
}
if(logger.isTraceEnabled()) { logger.trace("Exit submitJob"); }
}
@Override
public void onJobInitialized(JobProto.JobInitialized jobInited) {
if(logger.isTraceEnabled()) { logger.trace("Enter onJobInitialized"); }
jobManager.markJobInitialized(jobInited.jobId, System.currentTimeMillis());
if(jobInited.responseCode == SUCCESS) {
jobInited.requestor.tell(new SubmitJobResponse(jobInited.requestId, SUCCESS, jobInited.jobId.getId(), of(jobInited.jobId)), getSelf());
numJobsInitialized.increment();
} else {
logger.warn("Job was not initialized {}" , jobInited);
Optional<JobInfo> jobInfo = jobManager.getJobInfoForNonTerminalJob(jobInited.jobId);
if(jobInfo.isPresent()) {
cleanUpOnJobSubmitFailure(jobInfo.get().jobId);
// if this is not a cron submission inform the caller
if(jobInited.requestor != null)
jobInited.requestor.tell(new SubmitJobResponse(jobInited.requestId, jobInited.responseCode, "Job " + jobInited.jobId + " submission failed", ofNullable(jobInited.jobId)), getSelf());
} else {
logger.warn("No such job found {}", jobInited.jobId);
}
}
if(logger.isTraceEnabled()) { logger.trace("Exit onJobInitialized"); }
}
/**
* When a Job starts evaluate SLA to ensure the number of running jobs satisfies the SLA
* @param startedEvent JobStarted Event
*/
@Override
public void onJobStarted(final JobStartedEvent startedEvent) {
logger.info("job {} started event", startedEvent.jobid);
Optional<JobInfo> jobInfoOp = jobManager.getJobInfoForNonTerminalJob(startedEvent.jobid);
if(jobInfoOp.isPresent()) {
// enforce SLA
jobManager.markJobStarted(jobInfoOp.get());
getSelf().tell(new JobClusterProto.EnforceSLARequest(Instant.now(), of(jobInfoOp.get().jobDefinition)), getSelf());
}
}
private void cleanUpOnJobSubmitFailure(JobId jId) {
if(logger.isTraceEnabled()) { logger.trace("Enter cleanUpOnJobSubmitFailure {}", jId); }
if(jId != null) {
Optional<JobInfo> jobInfoOp = jobManager.getJobInfoForNonTerminalJob(jId);
if (jobInfoOp.isPresent()) { // ensure there is a record of this job
JobInfo jobInfo = jobInfoOp.get();
if (jobManager.markJobTerminating(jobInfo, JobState.Failed)) { // mark job as terminating
getContext().unwatch(jobInfo.jobActor);
getContext().stop(jobInfo.jobActor);
jobManager.markCompleted(jId, empty(), JobState.Failed);
// clear it from initializing table if present
jobManager.markJobInitialized(jId, System.currentTimeMillis());
} else {
logger.warn("cleanup on Job Submit failure failed for job {}", jId);
}
}
} else {
logger.warn("cleanup on Job Submit failure failed as there was no JobId");
}
if(logger.isTraceEnabled()) { logger.trace("Exit cleanUpOnJobSubmitFailure {}", jId); }
}
/**
*
* @param definition Job Definition to be validated
* @throws InvalidJobRequest If the job definition is invalid
*/
private void validateJobDefinition(JobDefinition definition) throws InvalidJobRequest {
if (definition == null) {
throw new InvalidJobRequest(null, "MantisJobDefinition cannot be null");
}
if (definition.getArtifactName() == null) {
throw new InvalidJobRequest(null, "MantisJobDefinition job artifactName attribute cannot be null");
}
if (definition.getName() == null) {
throw new InvalidJobRequest(null, "MantisJobDefinition name attribute cannot be null");
}
if (definition.getSchedulingInfo() == null) {
throw new InvalidJobRequest(null, "MantisJobDefinition schedulingInfo cannot be null");
}
for (StageSchedulingInfo ssi : definition.getSchedulingInfo().getStages().values()) {
validateConstraints(ssi.getSoftConstraints(), ssi.getHardConstraints());
}
}
private void validateConstraints(List<JobConstraints> softConstraints, List<JobConstraints> hardConstraints) throws InvalidJobRequest {
// ok to have null constraints as they will get replaced later with empty list in JobActor.setupStageWorkers
if(softConstraints != null) {
for (JobConstraints jc : softConstraints) {
if (ConstraintsEvaluators.softConstraint(jc, new HashSet<>()) == null) {
logger.error("Invalid Soft Job Constraint {}", jc);
throw new InvalidJobRequest(null, "Unknown constraint " + jc);
}
}
;
}
if(hardConstraints != null ) {
for (JobConstraints jc : hardConstraints) {
if (ConstraintsEvaluators.hardConstraint(jc, new HashSet<>()) == null) {
logger.error("Invalid Hard Job Constraint {}", jc);
throw new InvalidJobRequest(null, "Unknown constraint " + jc);
}
}
;
}
}
@Override
public void onWorkerEvent(WorkerEvent r) {
if(logger.isTraceEnabled()) { logger.trace("Enter onWorkerEvent {}", r); }
Optional<JobInfo> jobInfo = jobManager.getJobInfoForNonTerminalJob(r.getWorkerId().getJobId());
if(jobInfo.isPresent()) {
jobInfo.get().jobActor.forward(r, getContext());
} else {
if(!JobHelper.isTerminalWorkerEvent(r)) {
logger.warn("Event from worker {} has no valid running job. Terminating worker ", r.getWorkerId());
Optional<String> host = JobHelper.getWorkerHostFromWorkerEvent(r);
Optional<IMantisJobMetadata> completedJobOptional =
jobManager.getJobDataForCompletedJob(r.getWorkerId().getJobId());
if (completedJobOptional.isPresent()) {
JobDefinition jobDefinition =
completedJobOptional.get().getJobDefinition();
mantisSchedulerFactory
.forJob(jobDefinition)
.unscheduleAndTerminateWorker(r.getWorkerId(), host);
} else {
logger.warn("Non-terminal Event from worker {} has no completed job. Sending event to default cluster", r.getWorkerId());
mantisSchedulerFactory.forClusterID(null).unscheduleAndTerminateWorker(r.getWorkerId(), host);
}
} else {
logger.warn("Terminal Event from worker {} has no valid running job. Ignoring event ", r.getWorkerId());
}
}
if(logger.isTraceEnabled()) { logger.trace("Exit onWorkerEvent {}", r); }
}
/**
* @param req Resubmit worker message
*/
@Override
public void onResubmitWorkerRequest(ResubmitWorkerRequest req) {
if(logger.isTraceEnabled()) { logger.trace("Enter onResubmitWorkerRequest {}", req); }
onResubmitWorker(req);
if(logger.isTraceEnabled()) { logger.trace("Exit onResubmitWorkerRequest {}", req); }
}
/**
* Can be invoked in two ways
* 1. User requests a job termination
* 2. The job itself requests a termination due to
* a. Too many worker resubmits
* b. Max runtime limit has reached
* c. Subscription timeout reached
* @param req Kill job request message
*/
@Override
public void onJobKillRequest(KillJobRequest req) {
logger.info("JobClusterActor.onKillJobRequest {}", req);
Optional<JobInfo> jobInfo = jobManager.getJobInfoForNonTerminalJob(req.jobId);
ActorRef sender = getSender();
if(jobInfo.isPresent() && jobManager.markJobTerminating(jobInfo.get(), JobState.Failed)) {
jobInfo.get().jobActor.tell(req, getSelf());
} else {
logger.info("Job {} not found", req.jobId.getId() );
if (req.requestor != null) {
req.requestor.tell(
new JobClusterManagerProto.KillJobResponse(
req.requestId,
CLIENT_ERROR_NOT_FOUND,
JobState.Noop,
"Job " + req.jobId + " not found", req.jobId, req.user),
getSelf());
}
}
}
/**
* Sent by job actor when the job shutdown is initiated.
* @param resp Kill job response message
*/
@Override
public void onKillJobResponse(JobClusterProto.KillJobResponse resp) {
if(logger.isTraceEnabled()) { logger.trace("Enter onKillJobResponse {}", resp); }
if (resp.responseCode == SUCCESS) {
Optional<JobInfo> jInfo = jobManager.getJobInfoForNonTerminalJob(resp.jobId);
if(jInfo.isPresent() ) {
// stop watching actor
getContext().unwatch(jInfo.get().jobActor);
numJobShutdowns.increment();
logger.info("Marking job {} as terminated", jInfo.get().jobId);
// check requestor is not self to avoid an infinite loop
if (resp.requestor != null && !getSelf().equals(resp.requestor)) {
resp.requestor.tell(
new KillJobResponse(resp.requestId, resp.responseCode, resp.state, resp.message, resp.jobId, resp.user),
getSelf());
}
Optional<CompletedJob> completedJob = jobManager.markCompleted(resp.jobId, resp.jobMetadata, resp.state);
if(completedJob.isPresent()) {
logger.info("In cleanupAfterJobKill for Job {} in state {} and metadata {} ", resp.jobId, resp.state,resp.jobMetadata);
// enforce SLA
if(!jobClusterMetadata.isDisabled()) {
SLA sla = this.jobClusterMetadata.getJobClusterDefinition().getSLA();
if(sla.getMin() == 0 && sla.getMax() == 0) {
logger.info("{} No SLA specified nothing to enforce {}",
completedJob.get().getJobId(), sla);
} else {
try {
// first check if response has job meta for last job
Optional<IMantisJobMetadata> cJob = (resp.jobMetadata);
if (cJob == null || !cJob.isPresent()) {
// else check archived jobs
cJob = jobStore.getArchivedJob(completedJob.get().getJobId());
}
if( cJob != null && cJob.isPresent()) {
getSelf().tell(new JobClusterProto.EnforceSLARequest(Instant.now(), of(cJob.get().getJobDefinition())), ActorRef.noSender());
} else {
logger.warn("Could not load last terminated job to use for triggering enforce SLA");
}
} catch (Exception e) {
// should not get here
logger.warn("Exception {} loading completed Job {} to enforce SLA due", e.getMessage(), completedJob.get().getJobId(), e);
}
}
}
} else {
logger.warn("Unable to mark job {} completed. ", resp.jobId);
}
} else {
// should not get here
if (resp.requestor != null && !getSelf().equals(resp.requestor)) {
resp.requestor.tell(
new KillJobResponse(resp.requestId, CLIENT_ERROR, JobState.Noop, "Job not found", resp.jobId, resp.user),
getSelf());
}
}
} else {
if (resp.requestor != null && !getSelf().equals(resp.requestor)) {
// kill job was not successful relay to caller
resp.requestor.tell(
new KillJobResponse(resp.requestId, resp.responseCode, resp.state, resp.message, resp.jobId, resp.user),
getSelf());
}
}
if(logger.isTraceEnabled()) { logger.trace("Exit onKillJobResponse {}", resp); }
}
@Override
public void onGetJobDetailsRequest(GetJobDetailsRequest req) {
if(logger.isTraceEnabled()) { logger.trace("Enter GetJobDetails {}", req); }
GetJobDetailsResponse response = new GetJobDetailsResponse(req.requestId, CLIENT_ERROR_NOT_FOUND, "Job " + req.getJobId() + " not found", empty());
Optional<JobInfo> jInfo = jobManager.getJobInfoForNonTerminalJob(req.getJobId());
if(jInfo.isPresent()) {
if(logger.isDebugEnabled()) { logger.debug("Forwarding getJobDetails to job actor for {}", req.getJobId()); }
jInfo.get().jobActor.forward(req, getContext());
return;
} else {
// Could be a terminated job
Optional<CompletedJob> completedJob = jobManager.getCompletedJob(req.getJobId());
if(completedJob.isPresent()) {
if(logger.isDebugEnabled()) { logger.debug("Found Job {} in completed state ", req.getJobId()); }
try {
Optional<IMantisJobMetadata> jobMetaOp = jobStore.getArchivedJob(req.getJobId().getId());
if(jobMetaOp.isPresent()) {
response = new GetJobDetailsResponse(req.requestId, SUCCESS, "", jobMetaOp);
} else {
response = new GetJobDetailsResponse(req.requestId, CLIENT_ERROR_NOT_FOUND, "Job " + req.getJobId() + " not found", empty());
}
} catch (Exception e) {
logger.warn("Exception {} reading Job {} from Storage ", e.getMessage(), req.getJobId(), e);
response = new GetJobDetailsResponse(req.requestId, CLIENT_ERROR, "Exception reading Job " + req.getJobId() + " " + e.getMessage(), empty());
}
} else {
logger.warn("No such job {} ", req.getJobId());
}
}
getSender().tell(response, getSelf());
if(logger.isTraceEnabled()) { logger.trace("Exit GetJobDetails {}", req); }
}
@Override
public void onGetLatestJobDiscoveryInfo(JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest request) {
if(logger.isTraceEnabled()) { logger.trace("Enter onGetLatestJobDiscoveryInfo {}", request); }
ActorRef sender = getSender();
if(this.name.equals(request.getJobCluster())) {
JobId latestJobId = jobIdSubmissionSubject.getValue();
logger.debug("[{}] latest job Id for cluster: {}", name, latestJobId);
if (latestJobId != null) {
Optional<JobInfo> jInfo = jobManager.getJobInfoForNonTerminalJob(latestJobId);
if (jInfo.isPresent()) {
// ask job actor for discovery info
jInfo.get().jobActor.forward(request, getContext());
} else {
logger.info("job info not found for job ID when looking up discovery info: {}", latestJobId);
sender.tell(new GetLatestJobDiscoveryInfoResponse(request.requestId,
SERVER_ERROR,
"JobInfo not found when looking up discovery info for " + latestJobId,
empty()), getSelf());
}
} else {
// no latest job ID found for this job cluster
logger.debug("no latest Job ID found for job cluster {}", name);
sender.tell(new GetLatestJobDiscoveryInfoResponse(request.requestId,
CLIENT_ERROR_NOT_FOUND,
"No latest jobId found for job cluster " + name,
empty()), getSelf());
}
} else {
String msg = "Job Cluster " + request.getJobCluster() + " In request does not match the name of this actor " + this.name;
logger.warn(msg);
sender.tell(new JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse(request.requestId, SERVER_ERROR, msg, empty()), getSelf());
}
if(logger.isTraceEnabled()) { logger.trace("Exit onGetLatestJobDiscoveryInfo {}", request); }
}
@Override
public void onGetJobStatusSubject(GetJobSchedInfoRequest request) {
if(logger.isTraceEnabled()) { logger.trace("Enter onGetJobStatusSubject {}", request); }
Optional<JobInfo> jInfo = jobManager.getJobInfoForNonTerminalJob(request.getJobId());
if(jInfo.isPresent()) {
if(logger.isDebugEnabled()) { logger.debug("Forwarding getJobDetails to job actor for {}", request.getJobId()); }
jInfo.get().jobActor.forward(request, getContext());
} else {
// Could be a terminated job
GetJobSchedInfoResponse response = new GetJobSchedInfoResponse(request.requestId, CLIENT_ERROR, "Job " + request.getJobId() + " not found or not active", empty());
getSender().tell(response, getSelf());
}
if(logger.isTraceEnabled()) { logger.trace("Exit onGetJobStatusSubject "); }
}
@Override
public void onGetLastSubmittedJobIdSubject(GetLastSubmittedJobIdStreamRequest request) {
if(logger.isTraceEnabled()) { logger.trace("Enter onGetLastSubmittedJobIdSubject {}", request); }
ActorRef sender = getSender();
if(this.name.equals(request.getClusterName())) {
sender.tell(new GetLastSubmittedJobIdStreamResponse(request.requestId,SUCCESS,"",of(this.jobIdSubmissionSubject)),getSelf());
} else {
String msg = "Job Cluster " + request.getClusterName() + " In request does not match the name of this actor " + this.name;
logger.warn(msg);
sender.tell(new GetLastSubmittedJobIdStreamResponse(request.requestId,CLIENT_ERROR ,msg,empty()),getSelf());
}
if(logger.isTraceEnabled()) { logger.trace("Exit onGetLastSubmittedJobIdSubject {}", request); }
}
@Override
public void onBookkeepingRequest(JobClusterProto.BookkeepingRequest request) {
if(logger.isTraceEnabled()) { logger.trace("Enter onBookkeepingRequest for JobCluster {}", this.name); }
// Enforce SLA if exists
onEnforceSLARequest(new JobClusterProto.EnforceSLARequest());
// Tell all child jobs to migrate workers on disabled VMs (if any)
jobManager.actorToJobIdMap.keySet().forEach(actorRef -> actorRef.tell(new JobProto.MigrateDisabledVmWorkersRequest(request.time), ActorRef.noSender()));
if(logger.isTraceEnabled()) { logger.trace("Exit onBookkeepingRequest for JobCluster {}", name); }
}
@Override
public void onEnforceSLARequest(JobClusterProto.EnforceSLARequest request) {
if(logger.isTraceEnabled()) { logger.trace("Enter onEnforceSLA for JobCluster {} with request", this.name, request); }
numSLAEnforcementExecutions.increment();
long now = request.timeOfEnforcement.toEpochMilli();
List<JobInfo> pendingInitializationJobsPriorToCutoff = jobManager.getJobActorsStuckInInit(now, getExpirePendingInitializeDelayMs());
List<JobInfo> jobsStuckInAcceptedList = jobManager.getJobsStuckInAccepted(now, getExpireAcceptedDelayMs());
numJobsStuckInAccepted.increment(jobsStuckInAcceptedList.size());
List<JobInfo> jobsStuckInTerminatingList = jobManager.getJobsStuckInTerminating(now, getExpireAcceptedDelayMs());
if(!slaEnforcer.hasSLA()) {
return;
}
int activeJobsCount = jobManager.activeJobsCount();
int acceptedJobsCount = jobManager.acceptedJobsCount();
// enforcing min
int noOfJobsToLaunch = slaEnforcer.enforceSLAMin(activeJobsCount, acceptedJobsCount);
if(noOfJobsToLaunch > 0) {
logger.info("Submitting {} jobs for job name {} as active count is {} and accepted count is {}", noOfJobsToLaunch, name, activeJobsCount, acceptedJobsCount);
String user = MANTIS_MASTER_USER;
if(request.jobDefinitionOp.isPresent()) {
user = request.jobDefinitionOp.get().getUser();
}
for(int i=0; i< noOfJobsToLaunch; i++) {
getSelf().tell(new SubmitJobRequest(name, user, true,request.jobDefinitionOp), getSelf());
}
// enforce max.
} else {
List<JobInfo> listOfJobs = new ArrayList<>(activeJobsCount + acceptedJobsCount);
listOfJobs.addAll(jobManager.getActiveJobsList());
listOfJobs.addAll(jobManager.getAcceptedJobsList());
List<JobId> jobsToKill = slaEnforcer.enforceSLAMax(Collections.unmodifiableList(listOfJobs));
for (JobId jobId : jobsToKill) {
logger.info("Request termination for job {}", jobId);
getSelf().tell(
new KillJobRequest(
jobId, "SLA enforcement", JobCompletedReason.Killed, MANTIS_MASTER_USER, ActorRef.noSender()), getSelf());
}
}
if(logger.isTraceEnabled()) { logger.trace("Exit onEnforceSLA for JobCluster {}", name); }
}
private long getExpireAcceptedDelayMs() {
// stuck in accepted for more than 10mins
// TODO make part of config
return 10*60*1000;
}
/**
* Create a new JobDefinition using the given job definition. Inherit everything except the artifact name and version.
* @param jobDefinition
* @return Optional JobDefinition
*/
private Optional<JobDefinition> cloneToNewJobDefinitionWithoutArtifactNameAndVersion(JobDefinition jobDefinition) {
try {
JobDefinition clonedJobDefn = new JobDefinition.Builder().withJobSla(jobDefinition.getJobSla())
.withLabels(jobDefinition.getLabels())
.withName(jobDefinition.getName())
.withParameters(jobDefinition.getParameters())
.withSchedulingInfo(jobDefinition.getSchedulingInfo())
.withNumberOfStages(jobDefinition.getNumberOfStages())
.withSubscriptionTimeoutSecs(jobDefinition.getSubscriptionTimeoutSecs())
.withUser(jobDefinition.getUser())
.build();
return of(clonedJobDefn);
} catch (Exception e) {
logger.warn("Could not clone JobDefinition {} due to {}", jobDefinition, e.getMessage(), e);
e.printStackTrace();
}
// should not get here
return empty();
}
/**
* Fetch JobDefn of last job and clone it to a create a new one. Inherit the schedulingInfo and parameters
* @param completedJobs
* @param jobDefinitionOp
* @param store
* @return
*/
private Optional<JobDefinition> cloneJobDefinitionForQuickSubmitFromArchivedJobs(final List<CompletedJob> completedJobs,
Optional<JobDefinition> jobDefinitionOp,
MantisJobStore store) {
if(logger.isTraceEnabled()) { logger.trace("Enter createNewJobDefinitionFromLastSubmittedInheritSchedInfoAndParameters"); }
Optional<JobDefinition> lastSubmittedJobDefn = getLastSubmittedJobDefinition(completedJobs, jobDefinitionOp, store);
if(lastSubmittedJobDefn.isPresent()) {
if(logger.isTraceEnabled()) { logger.trace("Exit createNewJobDefinitionFromLastSubmittedInheritSchedInfoAndParameters"); }
return cloneToNewJobDefinitionWithoutArtifactNameAndVersion(lastSubmittedJobDefn.get());
}
if(logger.isTraceEnabled()) { logger.trace("Exit createNewJobDefinitionFromLastSubmittedInheritSchedInfoAndParameters empty"); }
return empty();
}
@Override
public void onExpireOldJobs(JobClusterProto.ExpireOldJobsRequest request) {
final long tooOldCutOff = System.currentTimeMillis() - (getTerminatedJobToDeleteDelayHours()*3600000L);
jobManager.purgeOldCompletedJobs(tooOldCutOff);
}
private long getExpirePendingInitializeDelayMs() {
// jobs older than 60 secs
return 60*1000;
}
/**
* When cron fires
* if a cron policy is keep_new then submit a new job
* else skip if a job is running at the moment, if not then submit a new job
* @param request Cron fired event
*/
@Override
public void onTriggerCron(JobClusterProto.TriggerCronRequest request) {
if(logger.isTraceEnabled()) { logger.trace("Enter onTriggerCron for Job Cluster {}", this.name);}
if(jobClusterMetadata.getJobClusterDefinition().getSLA().getCronPolicy() != null) {
if(jobClusterMetadata.getJobClusterDefinition().getSLA().getCronPolicy() == CronPolicy.KEEP_NEW ||
this.jobManager.getAllNonTerminalJobsList().size() == 0) {
getSelf().tell(new SubmitJobRequest(name, MANTIS_MASTER_USER, empty(), false), getSelf());
} else {
// A job is already running skip resubmiting
logger.info(name + ": Skipping submitting new job upon cron trigger, one exists already");
}
}
if(logger.isTraceEnabled()) { logger.trace("Exit onTriggerCron Triggered for Job Cluster {}", this.name);}
}
private long getTerminatedJobToDeleteDelayHours() {
return ConfigurationProvider.getConfig().getTerminatedJobToDeleteDelayHours();
}
@Override
public void onJobClusterUpdateSLA(UpdateJobClusterSLARequest slaRequest) {
if(logger.isTraceEnabled()) { logger.trace("Enter onJobClusterUpdateSLA {}", slaRequest); }
ActorRef sender = getSender();
try {
SLA newSla = new SLA(slaRequest.getMin(), slaRequest.getMax(), slaRequest.getCronSpec(), slaRequest.getCronPolicy());
JobClusterDefinitionImpl updatedDefn = new JobClusterDefinitionImpl.Builder().from(jobClusterMetadata.getJobClusterDefinition())
.withSla(newSla)
.build();
boolean isDisabled = jobClusterMetadata.isDisabled();
if(slaRequest.isForceEnable() && jobClusterMetadata.isDisabled()) {
isDisabled = false;
}
IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder()
.withIsDisabled(isDisabled)
.withLastJobCount(jobClusterMetadata.getLastJobCount())
.withJobClusterDefinition(updatedDefn)
.build();
updateAndSaveJobCluster(jobCluster);
if(cronManager != null)
cronManager.destroyCron();
this.cronManager = new CronManager(name, getSelf(), newSla);
sender.tell(new UpdateJobClusterSLAResponse(slaRequest.requestId, SUCCESS, name + " SLA updated"), getSelf());
eventPublisher.publishAuditEvent(
new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_UPDATE,
jobClusterMetadata.getJobClusterDefinition().getName(), name+" SLA update")
);
} catch(IllegalArgumentException e) {
logger.error("Invalid arguement job cluster not updated ", e);
sender.tell(new UpdateJobClusterSLAResponse(slaRequest.requestId, CLIENT_ERROR, name + " Job cluster SLA updation failed " + e.getMessage()), getSelf());
} catch(Exception e) {
logger.error("job cluster not updated ", e);
sender.tell(new UpdateJobClusterSLAResponse(slaRequest.requestId, SERVER_ERROR, name + " Job cluster SLA updation failed " + e.getMessage()), getSelf());
}
if(logger.isTraceEnabled()) { logger.trace("Exit onJobClusterUpdateSLA {}", slaRequest); }
}
@Override
public void onJobClusterUpdateLabels(UpdateJobClusterLabelsRequest labelRequest) {
if(logger.isTraceEnabled()) { logger.trace("Enter onJobClusterUpdateLabels {}", labelRequest); }
ActorRef sender = getSender();
try {
JobClusterConfig newConfig = new JobClusterConfig.Builder().from(jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig())
.build();
JobClusterDefinitionImpl updatedDefn = new JobClusterDefinitionImpl.Builder().from(jobClusterMetadata.getJobClusterDefinition())
.withJobClusterConfig(newConfig)
.withLabels(labelRequest.getLabels())
.build();
IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder()
.withIsDisabled(jobClusterMetadata.isDisabled())
.withLastJobCount(jobClusterMetadata.getLastJobCount())
.withJobClusterDefinition(updatedDefn)
.build();
updateAndSaveJobCluster(jobCluster);
sender.tell(new UpdateJobClusterLabelsResponse(labelRequest.requestId, SUCCESS, name + " labels updated"), getSelf());
eventPublisher.publishAuditEvent(
new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_UPDATE,
jobClusterMetadata.getJobClusterDefinition().getName(),
name + " update labels")
);
} catch(Exception e) {
logger.error("job cluster labels not updated ", e);
sender.tell(new UpdateJobClusterLabelsResponse(labelRequest.requestId, SERVER_ERROR, name + " labels updation failed " + e.getMessage()), getSelf());
}
if(logger.isTraceEnabled()) { logger.trace("Exit onJobClusterUpdateLabels {}", labelRequest); }
}
@Override
public void onJobClusterUpdateArtifact(UpdateJobClusterArtifactRequest artifactReq) {
if(logger.isTraceEnabled()) { logger.trace("Entering JobClusterActor:onJobClusterUpdateArtifact"); }
ActorRef sender = getSender();
try {
if(!isVersionUnique(artifactReq.getVersion(), jobClusterMetadata.getJobClusterDefinition().getJobClusterConfigs())) {
String msg = String.format("job cluster %s not updated as the version %s is not unique", name,artifactReq.getVersion());
logger.error(msg);
sender.tell(new UpdateJobClusterArtifactResponse(artifactReq.requestId, CLIENT_ERROR, msg), getSelf());
return;
}
JobClusterConfig newConfig = new JobClusterConfig.Builder().from(jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig())
.withArtifactName(artifactReq.getArtifactName())
.withVersion(artifactReq.getVersion())
.withUploadedAt(System.currentTimeMillis())
.build();
updateJobClusterConfig(newConfig);
if(!artifactReq.isSkipSubmit()) {
getSelf().tell(new SubmitJobRequest(name,artifactReq.getUser(), (empty()), false), getSelf());
}
sender.tell(new UpdateJobClusterArtifactResponse(artifactReq.requestId, SUCCESS, name + " artifact updated"), getSelf());
} catch(Exception e) {
logger.error("job cluster not updated ", e);
sender.tell(new UpdateJobClusterArtifactResponse(artifactReq.requestId, SERVER_ERROR, name + " Job cluster artifact updation failed " + e.getMessage()), getSelf());
}
if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:onJobClusterUpdateArtifact"); }
}
private void updateJobClusterConfig(JobClusterConfig newConfig) throws Exception {
JobClusterDefinitionImpl updatedDefn = new JobClusterDefinitionImpl.Builder().from(jobClusterMetadata.getJobClusterDefinition())
.withJobClusterConfig(newConfig)
.build();
IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder()
.withIsDisabled(jobClusterMetadata.isDisabled())
.withLastJobCount(jobClusterMetadata.getLastJobCount())
.withJobClusterDefinition(updatedDefn)
.build();
updateAndSaveJobCluster(jobCluster);
eventPublisher.publishAuditEvent(
new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_UPDATE,
jobClusterMetadata.getJobClusterDefinition().getName(),
name + " artifact update")
);
}
@Override
public void onJobClusterUpdateSchedulingInfo(UpdateSchedulingInfo request) {
ActorRef sender = getSender();
try {
if (!isVersionUnique(request.getVersion(), jobClusterMetadata.getJobClusterDefinition().getJobClusterConfigs())) {
String msg = String.format(
"job cluster %s not updated as the version %s is not unique", name,
request.getVersion());
logger.error(msg);
sender.tell(
new UpdateSchedulingInfoResponse(request.getRequestId(), CLIENT_ERROR, msg),
getSelf());
return;
}
JobClusterConfig newConfig = new JobClusterConfig.Builder().from(
jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig())
.withVersion(request.getVersion())
.withSchedulingInfo(request.getSchedulingInfo())
.withUploadedAt(System.currentTimeMillis())
.build();
updateJobClusterConfig(newConfig);
sender.tell(
new UpdateSchedulingInfoResponse(request.getRequestId(), SUCCESS,
name + " schedulingInfo updated"), getSelf());
} catch (Exception e) {
logger.error("job cluster not updated ", e);
sender.tell(new UpdateSchedulingInfoResponse(request.getRequestId(), SERVER_ERROR, name + " Job cluster schedulingInfo update failed " + e.getMessage()), getSelf());
}
}
boolean isVersionUnique(String artifactVersion, List<JobClusterConfig> existingConfigs) {
if(logger.isTraceEnabled()) { logger.trace("Enter JobClusterActor {} isVersionnique {} existing versions {}",name,artifactVersion,existingConfigs);}
for(JobClusterConfig config : existingConfigs) {
if(config.getVersion().equals(artifactVersion)) {
logger.info("Given Version {} is not unique during UpdateJobCluster {}",artifactVersion, name);
return false;
}
}
return true;
}
//TODO validate the migration config json
@Override
public void onJobClusterUpdateWorkerMigrationConfig(UpdateJobClusterWorkerMigrationStrategyRequest req) {
if(logger.isTraceEnabled()) { logger.trace("Entering JobClusterActor:onJobClusterUpdateWorkerMigrationConfig {}", req); }
ActorRef sender = getSender();
try {
JobClusterDefinitionImpl updatedDefn = new JobClusterDefinitionImpl.Builder().from(jobClusterMetadata.getJobClusterDefinition())
.withMigrationConfig(req.getMigrationConfig())
.build();
IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder()
.withIsDisabled(jobClusterMetadata.isDisabled())
.withLastJobCount(jobClusterMetadata.getLastJobCount())
.withJobClusterDefinition(updatedDefn)
.build();
updateAndSaveJobCluster(jobCluster);
sender.tell(new UpdateJobClusterWorkerMigrationStrategyResponse(req.requestId, SUCCESS, name + " worker migration config updated"), getSelf());
eventPublisher.publishAuditEvent(
new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_UPDATE,
jobClusterMetadata.getJobClusterDefinition().getName(),
name + " worker migration config update")
);
} catch(Exception e) {
logger.error("job cluster migration config not updated ", e);
sender.tell(new UpdateJobClusterWorkerMigrationStrategyResponse(req.requestId, SERVER_ERROR, name + " Job cluster worker migration config updation failed " + e.getMessage()), getSelf());
}
if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:onJobClusterUpdateWorkerMigrationConfig {}", req); }
}
private void updateAndSaveJobCluster(IJobClusterMetadata jobCluster) throws Exception {
if(logger.isTraceEnabled()) { logger.trace("Entering JobClusterActor:updateAndSaveJobCluster {}", jobCluster.getJobClusterDefinition().getName()); }
jobStore.updateJobCluster(jobCluster);
jobClusterMetadata = jobCluster;
// enable cluster if
if(!jobClusterMetadata.isDisabled()) {
getContext().become(initializedBehavior);
}
slaEnforcer = new SLAEnforcer(jobClusterMetadata.getJobClusterDefinition().getSLA());
logger.info("successfully saved job cluster");
if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:updateAndSaveJobCluster {}", jobCluster.getJobClusterDefinition().getName()); }
}
/**
* Fetch job definition for quick submit mode.
* If a job definition is passed return it immediately
* Else find the last submitted job and look in completed job.
* (For quick submit with active job, the request is passed to the active job actor to process instead).
* @param completedJobs completed job list
* @param jobDefinitionOp optional job definition
* @param store store reference if required to load from store
* @return JobDefinition of last submitted job if found
*/
/*package protected*/
private Optional<JobDefinition> getLastSubmittedJobDefinition(final List<CompletedJob> completedJobs,
Optional<JobDefinition> jobDefinitionOp,
MantisJobStore store) {
if(logger.isTraceEnabled()) { logger.trace("Entering getLastSubmittedJobDefinition"); }
if(jobDefinitionOp.isPresent()) {
return jobDefinitionOp;
}
Optional<JobId> lastJobId = JobListHelper.getLastSubmittedJobId(Collections.emptyList(), completedJobs);
if(lastJobId.isPresent()) {
Optional<CompletedJob> completedJob = jobManager.getCompletedJob(lastJobId.get());
if (completedJob.isPresent()) {
try {
Optional<IMantisJobMetadata> archivedJob = store.getArchivedJob(completedJob.get().getJobId());
if(archivedJob.isPresent()) {
if(logger.isTraceEnabled()) { logger.trace("Exit getLastSubmittedJobDefinition returning job {} with defn {}", archivedJob.get().getJobId(), archivedJob.get().getJobDefinition()); }
return of(archivedJob.get().getJobDefinition());
} else {
logger.warn("Could not find load archived Job {} for cluster {}", completedJob.get().getJobId(), name);
}
} catch (Exception e) {
logger.warn("Archived Job {} could not be loaded from the store due to {} ", completedJob.get().getJobId(), e.getMessage());
}
} else {
logger.warn("Could not find any previous submitted/completed Job for cluster {}", name);
}
} else {
logger.warn("Could not find any previous submitted Job for cluster {}", name);
}
if(logger.isTraceEnabled()) { logger.trace("Exit getLastSubmittedJobDefinition empty"); }
return empty();
}
/**
* 2 cases this can occur
* 1. Graceful shutdown : Where the job cluster actor requests the job actor to terminate. In this case we simply clear the pending
* delete jobs map
*
* 2. Unexpected shutdown : The job actor terminated unexpectedly in which case we need to relaunch the actor.
* @param terminatedEvent Event describing a job actor was terminated
*/
private void onTerminated(Terminated terminatedEvent) {
if(logger.isDebugEnabled()) { logger.debug("onTerminatedEvent {} ", terminatedEvent); }
// TODO relaunch actor ?
}
@Override
public void onScaleStage(ScaleStageRequest req) {
if(logger.isTraceEnabled()) { logger.trace("Exit onScaleStage {}", req); }
Optional<JobInfo> jobInfo = jobManager.getJobInfoForNonTerminalJob(req.getJobId());
ActorRef sender = getSender();
if(jobInfo.isPresent()) {
jobInfo.get().jobActor.forward(req, getContext());
} else {
sender.tell(new ScaleStageResponse(req.requestId, CLIENT_ERROR, "Job " + req.getJobId() + " not found. Could not scale stage to " + req.getNumWorkers(), 0), getSelf());
}
if(logger.isTraceEnabled()) { logger.trace("Exit onScaleStage {}", req); }
}
@Override
public void onResubmitWorker(ResubmitWorkerRequest req) {
if(logger.isTraceEnabled()) { logger.trace("Exit JCA:onResubmitWorker {}", req); }
Optional<JobInfo> jobInfo = jobManager.getJobInfoForNonTerminalJob(req.getJobId());
ActorRef sender = getSender();
if(jobInfo.isPresent()) {
jobInfo.get().jobActor.forward(req, getContext());
} else {
sender.tell(new ResubmitWorkerResponse(req.requestId, CLIENT_ERROR, "Job " + req.getJobId() + " not found. Could not resubmit worker"), getSelf());
}
if(logger.isTraceEnabled()) { logger.trace("Exit JCA:onResubmitWorker {}", req); }
}
static final class JobInfo {
final long submittedAt;
public String version;
volatile long initializeInitiatedAt = -1;
volatile long initializedAt = -1;
volatile long terminationInitiatedAt = -1;
volatile long terminatedAt = -1;
final JobId jobId;
final ActorRef jobActor;
volatile JobState state;
final String user;
final JobDefinition jobDefinition;
JobInfo(JobId jobId, JobDefinition jobDefinition, long submittedAt, ActorRef jobActor, JobState state, String user, long initializeInitiatedAt, long initedAt) {
this.submittedAt = submittedAt;
this.jobActor = jobActor;
this.jobId = jobId;
this.state = state;
this.user = user;
this.jobDefinition = jobDefinition;
this.initializeInitiatedAt = initializeInitiatedAt;
this.initializedAt = initedAt;
}
@Override
public String toString() {
return "JobInfo{" +
"submittedAt=" + submittedAt +
", initializeInitiatedAt=" + initializeInitiatedAt +
", initializedAt=" + initializedAt +
", terminationInitiatedAt=" + terminationInitiatedAt +
", terminatedAt=" + terminatedAt +
", jobId=" + jobId +
", jobActor=" + jobActor +
", state=" + state +
", user='" + user + '\'' +
", jobDefinition=" + jobDefinition +
'}';
}
void setInitializeInitiatedAt(long t) {
this.initializeInitiatedAt = t;
}
void setInitializedAt(long t) {
this.initializedAt = t;
}
void setState(JobState state) {
this.state = state;
}
void setTerminationInitiatedAt(long terminationInitiatedAt) {
this.terminationInitiatedAt = terminationInitiatedAt;
}
public void setTerminatedAt(long terminatedAt) {
this.terminatedAt = terminatedAt;
}
JobInfo(JobId jobId, JobDefinition jobDefinition, long submittedAt, ActorRef jobActor, JobState state, String user) {
this(jobId, jobDefinition, submittedAt, jobActor, state, user, -1, -1);
}
static class Builder {
long submittedAt = -1;
long initializeInitiatedAt = -1;
long initializedAt = -1;
JobId jobId = null;
ActorRef jobActor = null;
JobState state = null;
String user = "";
JobDefinition jobDefinition = null;
Builder withSubmittedAt(long submittedAt) {
this.submittedAt = submittedAt;
return this;
}
Builder withInitializeInitiatedAt(long t) {
this.initializeInitiatedAt = t;
return this;
}
Builder withInitializedAt(long t) {
this.initializedAt = t;
return this;
}
Builder withJobId(JobId jId) {
this.jobId = jId;
return this;
}
Builder withJobActor(ActorRef actor) {
this.jobActor = actor;
return this;
}
Builder withJobDefinition(JobDefinition jd) {
this.jobDefinition = jd;
return this;
}
Builder withUser(String user) {
this.user = user;
return this;
}
Builder withState(JobState state) {
this.state = state;
return this;
}
Builder usingJobMetadata(MantisJobMetadataImpl jobMeta, ActorRef actor) {
this.jobId = jobMeta.getJobId();
this.jobDefinition = jobMeta.getJobDefinition();
this.submittedAt = jobMeta.getSubmittedAtInstant().toEpochMilli();
this.state = jobMeta.getState();
this.user = jobMeta.getUser();
this.jobActor = actor;
return this;
}
JobInfo build() {
Preconditions.checkNotNull(jobId, "JobId cannot be null");
Preconditions.checkNotNull(jobDefinition, "JobDefinition cannot be null");
Preconditions.checkNotNull(state, "state cannot be null");
Preconditions.checkNotNull(jobActor, "Job Actor cannot be null");
return new JobInfo(jobId,jobDefinition,submittedAt,jobActor,state,user,initializeInitiatedAt,initializedAt);
}
}
}
/**
* Responsible of keeping track of Jobs Belonging to this cluster.
* As a job moves from Accepted -> Launched -> Terminating -> Completed states it is moved between
* the corresponding maps.
* This class is NOT ThreadSafe the caller should ensure it is not accessed concurrently
* @author njoshi
*
*/
final static class JobManager {
private final Logger logger = LoggerFactory.getLogger(JobManager.class);
private final String name;
// Map of Actor ref to JobId
private final Map<ActorRef, JobId> actorToJobIdMap = new HashMap<>();
// Map of Job Actors pending initialization
private final ConcurrentMap<JobId, JobInfo> pendingInitializationJobsMap = new ConcurrentHashMap<>();
// Map of Jobs in Launched state
private final ConcurrentMap<JobId, JobInfo> activeJobsMap = new ConcurrentHashMap<>();
// Map of Jobs in accepted state
private final ConcurrentMap<JobId, JobInfo> acceptedJobsMap = new ConcurrentHashMap<>();
private final Set<JobInfo> nonTerminalSortedJobSet = new TreeSet<>((o1, o2) -> {
if (o1.submittedAt < o2.submittedAt) {
return 1;
} else if (o1.submittedAt > o2.submittedAt) {
return -1;
} else {
return 0;
}
});
// Cache that deals with completed job
private final CompletedJobCache completedJobsCache;
// Map of Jobs in terminating state
private final Map<JobId, JobInfo> terminatingJobsMap = new HashMap<>();
private final ActorContext context;
private final MantisSchedulerFactory scheduler;
private final LifecycleEventPublisher publisher;
private final MantisJobStore jobStore;
private final CostsCalculator costsCalculator;
private final LabelCache labelCache = new LabelCache();
JobManager(String clusterName, ActorContext context, MantisSchedulerFactory schedulerFactory, LifecycleEventPublisher publisher, MantisJobStore jobStore, CostsCalculator costsCalculator) {
this.name = clusterName;
this.jobStore = jobStore;
this.context = context;
this.scheduler = schedulerFactory;
this.publisher = publisher;
this.completedJobsCache = new CompletedJobCache(name, labelCache);
this.costsCalculator = costsCalculator;
}
/**
* Invoked in a scheduled timer on the JobClusterActor to purge expired jobs
*
* @param tooOldCutOff Current cut off delta
*/
public void purgeOldCompletedJobs(long tooOldCutOff) {
completedJobsCache.purgeOldCompletedJobs(tooOldCutOff, jobStore);
}
public void cleanupAllCompletedJobs() {
completedJobsCache.forcePurgeCompletedJobs(jobStore);
}
Observable<JobProto.JobInitialized> bootstrapJob(MantisJobMetadataImpl jobMeta, IJobClusterMetadata jobClusterMetadata) {
// create jobInfo
JobInfo jobInfo = createJobInfoAndActorAndWatchActor(jobMeta, jobClusterMetadata);
// add to appropriate map
actorToJobIdMap.put(jobInfo.jobActor, jobInfo.jobId);
if (jobInfo.state.equals(JobState.Accepted)) {
acceptedJobsMap.put(jobInfo.jobId, jobInfo);
nonTerminalSortedJobSet.add(jobInfo);
} else if (jobInfo.state.equals(JobState.Launched)) {
activeJobsMap.put(jobInfo.jobId, jobInfo);
nonTerminalSortedJobSet.add(jobInfo);
} else if (jobInfo.state.equals(JobState.Terminating_abnormal) || jobInfo.state.equals(JobState.Terminating_normal)) {
terminatingJobsMap.put(jobInfo.jobId, jobInfo);
nonTerminalSortedJobSet.add(jobInfo);
} else {
logger.warn("Unexpected job state {}", jobInfo.state);
}
long masterInitTimeoutSecs = ConfigurationProvider.getConfig().getMasterInitTimeoutSecs();
long timeout = ((masterInitTimeoutSecs - 60)) > 0 ? (masterInitTimeoutSecs - 60) : masterInitTimeoutSecs;
Duration t = Duration.ofSeconds(timeout);
// mark it as pending actor init
markJobInitializeInitiated(jobInfo, System.currentTimeMillis());
CompletionStage<JobProto.JobInitialized> respCS = ask(jobInfo.jobActor, new JobProto.InitJob(ActorRef.noSender(), false), t)
.thenApply(JobProto.JobInitialized.class::cast);
return Observable.from(respCS.toCompletableFuture(), Schedulers.io())
.onErrorResumeNext(ex -> {
logger.warn("caught exception {}", ex.getMessage(), ex);
return Observable.just(new JobProto.JobInitialized(1, SERVER_ERROR, "Timeout initializing Job " + jobInfo.jobId + " exception -> " + ex.getMessage(), jobInfo.jobId, ActorRef.noSender()));
})
.map((jobInited) -> {
// once init response received remove from pending init map.
markJobInitialized(jobInited.jobId, System.currentTimeMillis());
return jobInited;
})
;
}
JobInfo initJob(MantisJobMetadataImpl jobMeta, IJobClusterMetadata jobClusterMetadata, ActorRef sender) {
JobInfo jobInfo = createJobInfoAndActorAndWatchActor(jobMeta, jobClusterMetadata);
markJobAccepted(jobInfo);
jobInfo.jobActor.tell(new JobProto.InitJob(sender, true), context.self());
markJobInitializeInitiated(jobInfo, System.currentTimeMillis());
return jobInfo;
}
JobInfo createJobInfoAndActorAndWatchActor(MantisJobMetadataImpl jobMeta, IJobClusterMetadata jobClusterMetadata) {
MantisScheduler scheduler1 = scheduler.forJob(jobMeta.getJobDefinition());
ActorRef jobActor = context.actorOf(JobActor.props(jobClusterMetadata.getJobClusterDefinition(),
jobMeta, jobStore, scheduler1, publisher, costsCalculator), "JobActor-" + jobMeta.getJobId().getId());
context.watch(jobActor);
// Add to label cache
labelCache.addJobIdToLabelCache(jobMeta.getJobId(), jobMeta.getLabels());
return new JobInfo.Builder()
.usingJobMetadata(jobMeta, jobActor)
.build();
}
void markJobInitialized(JobId jobId, long ts) {
JobInfo removed = this.pendingInitializationJobsMap.remove(jobId);
if (removed != null) {
removed.setInitializedAt(ts);
}
}
void markJobInitializeInitiated(JobInfo jobInfo, long ts) {
jobInfo.setInitializeInitiatedAt(ts);
// mark it as pending actor init
pendingInitializationJobsMap.put(jobInfo.jobId, jobInfo);
}
/**
* During startup if a job is in terminal state then directly mark it as completed
*
* @param jobMeta job metadata of completed job
*/
void persistToCompletedJobAndArchiveJobTables(IMantisJobMetadata jobMeta) {
completedJobsCache.persistToCompletedJobAndArchiveJobTables(jobMeta, jobStore);
}
/**
* Used during bootstrap to add the list of completedJobs to cache
*
* @param completedJobsList
*/
void addCompletedJobsToCache(List<CompletedJob> completedJobsList) {
completedJobsCache.addCompletedJobsToCache(completedJobsList);
}
/**
* Called on Job Submit. Updates the acceptedJobsMap & actorMap
*
* @param jobInfo job info of accepted job
* @return true if successful
*/
boolean markJobAccepted(JobInfo jobInfo) {
boolean isSuccess = false;
if (!jobInfo.state.isValidStateChgTo(JobState.Accepted) || activeJobsMap.containsKey(jobInfo.jobId) || terminatingJobsMap.containsKey(jobInfo.jobId) || completedJobsCache.containsKey(jobInfo.jobId)) {
String warn = String.format("Job %s already exists", jobInfo.jobId);
logger.warn(warn);
} else {
this.acceptedJobsMap.put(jobInfo.jobId, jobInfo);
this.actorToJobIdMap.put(jobInfo.jobActor, jobInfo.jobId);
nonTerminalSortedJobSet.add(jobInfo);
isSuccess = true;
}
return isSuccess;
}
List<JobInfo> getPendingInitializationJobsPriorToCutoff(long ts) {
return this.pendingInitializationJobsMap.values().stream().filter((jInfo) -> {
if (jInfo.initializedAt == -1 && jInfo.initializeInitiatedAt < ts) {
return true;
}
return false;
})
.collect(Collectors.toList());
}
/**
* Transition job to terminating state.
*
* @param jobInfo For the job which is terminating
* @param newState whether it is normal or abnormal termination
* @return true if successful
*/
boolean markJobTerminating(JobInfo jobInfo, JobState newState) {
boolean isSuccess = false;
if (JobState.isTerminalState(newState) && jobInfo.state.isValidStateChgTo(newState)) {
this.activeJobsMap.remove(jobInfo.jobId);
this.acceptedJobsMap.remove(jobInfo.jobId);
nonTerminalSortedJobSet.add(jobInfo);
jobInfo.setState(newState);
this.terminatingJobsMap.put(jobInfo.jobId, jobInfo);
jobInfo.setTerminationInitiatedAt(System.currentTimeMillis());
isSuccess = true;
} else {
String warn = "Unexpected job terminating event " + jobInfo.jobId + " Invalid transition from state " + jobInfo.state + " to state " + newState + " ";
logger.warn(warn);
}
return isSuccess;
}
/**
* Marks the job as started by putting it into the activejobsmap
* in case of a valid transition
*
* @param jobInfo job info for the job that just started
* @return true if successful and false if failed due to an invalid transition
*/
boolean markJobStarted(JobInfo jobInfo) {
boolean success = false;
if (jobInfo.state.isValidStateChgTo(JobState.Launched)) {
jobInfo.setState(JobState.Launched);
// remove from accepted jobs map
this.acceptedJobsMap.remove(jobInfo.jobId);
// add to active jobs map
this.activeJobsMap.put(jobInfo.jobId, jobInfo);
nonTerminalSortedJobSet.add(jobInfo);
success = true;
} else {
String warn = String.format("Unexpected job started event %s Invalid transition from state %s to state %s", jobInfo.jobId, jobInfo.state, JobState.Launched);
logger.warn(warn);
}
return success;
}
Optional<CompletedJob> markCompleted(JobId jId, Optional<IMantisJobMetadata> jobMetadata, JobState state) {
return markCompleted(jId, System.currentTimeMillis(), jobMetadata, state);
}
/**
* Invoked during clean up phase when the Job Actor has informed the Cluster that all workers have been terminated
*
* @param jId job id of the job that completed
* @return An instance of CompletedJob that would be used to persist to storage.
*/
Optional<CompletedJob> markCompleted(JobId jId, long completionTime, Optional<IMantisJobMetadata> jobMetadata, JobState state) {
if (logger.isTraceEnabled()) {
logger.trace("Enter markCompleted job {}", jId);
}
Optional<JobInfo> jobInfoOp = getJobInfoForNonTerminalJob(jId);
if (jobInfoOp.isPresent()) {
JobInfo jInfo = jobInfoOp.get();
jInfo.state = state;
jInfo.setTerminatedAt(completionTime);
this.acceptedJobsMap.remove(jId);
this.terminatingJobsMap.remove(jId);
this.activeJobsMap.remove(jId);
this.actorToJobIdMap.remove(jobInfoOp.get().jobActor);
this.nonTerminalSortedJobSet.remove(jInfo);
if (logger.isTraceEnabled()) {
logger.trace("Exit markCompleted job {}", jId);
}
JobState finalState = JobState.Completed;
String version = null;
if(jobMetadata.isPresent()) {
finalState = jobMetadata.get().getState();
version = jobMetadata.get().getJobDefinition().getVersion();
}
return this.completedJobsCache.markCompleted(jId, jobMetadata, jInfo.submittedAt, completionTime, jInfo.user, version, finalState, jobStore);
} else {
logger.warn("No such job {}", jId);
return empty();
}
}
void markCompletedDuringStartup(JobId jId, long completionTime, IMantisJobMetadata jobMetadata, JobState state) {
if(logger.isTraceEnabled()) { logger.trace("Enter markCompletedDuringStartup job {}", jId);}
JobState finalState = JobState.isTerminalState(jobMetadata.getState()) ? jobMetadata.getState() : JobState.Completed;
String version = jobMetadata.getJobDefinition().getVersion();
this.completedJobsCache.markCompleted(jId,of(jobMetadata), jobMetadata.getSubmittedAtInstant().toEpochMilli(), completionTime, jobMetadata.getUser(), version, finalState, jobStore);
}
List<JobInfo> getAllNonTerminalJobsList() {
List<JobInfo> allJobsList = new ArrayList<>(this.nonTerminalSortedJobSet);
if(logger.isTraceEnabled()) { logger.trace("Exiting JobClusterActor:getAllNonTerminatlJobsList {}", allJobsList); }
return allJobsList;
}
/**
* List of Jobs in accepted state.
* @return list of accepted job info
*/
List<JobInfo> getAcceptedJobsList() {
List<JobInfo> acceptedJobsList = Lists.newArrayListWithExpectedSize(this.acceptedJobsCount());
acceptedJobsList.addAll(this.acceptedJobsMap.values());
return Collections.unmodifiableList(acceptedJobsList);
}
/**
* List of Jobs in active state
* @return list of active job info
*/
List<JobInfo> getActiveJobsList() {
List<JobInfo> activeJobList = Lists.newArrayListWithExpectedSize(activeJobsMap.size());
activeJobList.addAll(this.activeJobsMap.values());
return Collections.unmodifiableList(activeJobList);
}
/**
* List of jobs in completed state
* @return list of completed jobs
*/
List<CompletedJob> getCompletedJobsList() {
return new ArrayList<>(completedJobsCache.getCompletedJobSortedSet());
}
List<JobInfo> getTerminatingJobsList() {
List<JobInfo> terminatingJobsList = Lists.newArrayListWithExpectedSize(terminatingJobsMap.size());
terminatingJobsList.addAll(this.terminatingJobsMap.values());
return Collections.unmodifiableList(terminatingJobsList);
}
/**
* No. of jobs in accepted state
* @return no of accepted jobs
*/
int acceptedJobsCount() {
return this.acceptedJobsMap.size();
}
/**
* No. of jobs in running state
* @return no of active jobs
*/
int activeJobsCount() {
return this.activeJobsMap.size();
}
Optional<CompletedJob> getCompletedJob(JobId jId) {
return completedJobsCache.getCompletedJob(jId);
}
Optional<IMantisJobMetadata> getJobDataForCompletedJob(String jId) {
Optional<JobId> jobId = JobId.fromId(jId);
if(jobId.isPresent()) {
return completedJobsCache.getJobDataForCompletedJob(jobId.get(), jobStore);
} else {
logger.warn("Invalid Job Id {} in getJobDataForCompletedJob", jId);
return empty();
}
}
/**
* Returns the JobInfo associated with the JobId. The Job could be in Accepted, Launched or Terminating states
* But not terminated state.
* @param jId JobId whose JobInfo is being lookedup
* @return JobInfo corresponding to the jobId, empty if not found
*/
Optional<JobInfo> getJobInfoForNonTerminalJob(JobId jId) {
if(logger.isTraceEnabled() ) { logger.trace("In getJobInfo {}", jId); }
if(acceptedJobsMap.containsKey(jId)) {
if(logger.isDebugEnabled() ) { logger.debug("Found {} in accepted state", jId); }
return of(acceptedJobsMap.get(jId));
} else if(activeJobsMap.containsKey(jId)) {
if(logger.isDebugEnabled() ) { logger.debug("Found {} in active state", jId); }
return of(activeJobsMap.get(jId));
} else if(this.terminatingJobsMap.containsKey(jId)) {
if(logger.isDebugEnabled() ) { logger.debug("Found {} in terminating state", jId); }
return of(terminatingJobsMap.get(jId));
}
return empty();
}
Optional<JobInfo> getJobInfoForNonTerminalJob(String jobId) {
Optional<JobId> jId = JobId.fromId(jobId);
if(jId.isPresent()) {
return getJobInfoForNonTerminalJob(jId.get());
}
return empty();
}
Optional<JobInfo> getJobInfoByUniqueId(final String uniqueId) {
return this.getAllNonTerminalJobsList().stream().filter((jobInfo) -> {
String unq = jobInfo.jobDefinition.getJobSla().getUserProvidedType();
return unq != null && !unq.isEmpty() && unq.equals(uniqueId);
}).findFirst();
}
private List<JobInfo> getJobActorsStuckInInit(long now, long allowedDelay) {
return getPendingInitializationJobsPriorToCutoff(now - allowedDelay)
.stream()
.peek((jobInfo) -> logger.warn("Job {} waiting for initialization since {}", jobInfo.jobId, jobInfo.initializeInitiatedAt))
.collect(Collectors.toList());
}
private List<JobInfo> getJobsStuckInAccepted(long now, long allowedDelay) {
return getAcceptedJobsList().stream()
.filter((jobInfo -> jobInfo.submittedAt < now - allowedDelay))
.peek((jobInfo) -> logger.warn("Job {} stuck in accepted since {}", jobInfo.jobId, Instant.ofEpochMilli(jobInfo.submittedAt)))
.collect(Collectors.toList());
}
private List<JobInfo> getJobsStuckInTerminating(long now, long allowedDelay) {
return getTerminatingJobsList().stream()
.filter((jobInfo -> jobInfo.terminationInitiatedAt < now - allowedDelay))
.peek((jobInfo) -> logger.warn("Job {} stuck in terminating since {}", jobInfo.jobId, Instant.ofEpochMilli(jobInfo.terminationInitiatedAt)))
.collect(Collectors.toList());
}
boolean isJobListEmpty() {
return activeJobsMap.isEmpty() && acceptedJobsMap.isEmpty();
}
public Set<JobId> getJobsMatchingLabels(List<Label> labels, Optional<String> labelsOp) {
boolean isAnd = false;
if(labelsOp.isPresent()) {
if(labelsOp.get().equalsIgnoreCase(LabelUtils.AND_OPERAND)) {
isAnd = true;
}
}
return labelCache.getJobIdsMatchingLabels(labels, isAnd);
}
}
/**
* Maintains a map of label to JobbId. Note the map is Label to Job Id and not
* Label.key to JobId.
*
*/
final static class LabelCache {
final Map<Label, Set<JobId>> labelJobIdMap = new HashMap<>();
final Map<JobId, List<Label>> jobIdToLabelMap = new HashMap<>();
private final Logger logger = LoggerFactory.getLogger(LabelCache.class);
/**
* Invoked in the following ways
* 1. During bootstrap of Job cluster when a Job Actor is created for an existing running job
* 2. When a new Job Actor is created during job submission
* 3. When the completed jobs list is being populated at bootstrap
* @param jobId
* @param labelList
*/
void addJobIdToLabelCache(JobId jobId,List<Label> labelList) {
if(logger.isTraceEnabled()) { logger.trace("addJobIdToLabelCache " + jobId + " labelList " + labelList + " current map " + labelJobIdMap); }
if(labelList == null) {
return;
}
for(Label label : labelList) {
Set<JobId> jobIds = labelJobIdMap.get(label);
if(jobIds != null) {
jobIds.add(jobId);
} else {
Set<JobId> jobIdList = new HashSet<>();
jobIdList.add(jobId);
labelJobIdMap.put(label, jobIdList);
}
}
jobIdToLabelMap.put(jobId, labelList);
if(logger.isTraceEnabled()) { logger.trace("Exit addJobIdToLabelCache " + jobId + " labelList " + labelList + " new map " + labelJobIdMap); }
}
/**
* Invoked when a job is completely purged from the system.
* This happens after a completed job hits its expiry time.
* @param jobId
*/
void removeJobIdFromLabelCache(JobId jobId) {
if(logger.isTraceEnabled()) { logger.trace("removeJobIdFromLabelCache " + jobId + " current map " + labelJobIdMap);}
List<Label> labels = jobIdToLabelMap.get(jobId);
if(labels != null) {
for(Label label : labels) {
Set<JobId> jobIds = labelJobIdMap.get(label);
jobIds.remove(jobId);
if(jobIds.isEmpty()) {
labelJobIdMap.remove(label);
}
}
}
jobIdToLabelMap.remove(jobId);
if(logger.isTraceEnabled()) { logger.trace("Exit removeJobIdFromLabelCache " + jobId + " current map " + labelJobIdMap); }
}
/**
* Invoked during jobList and jobIdList api calls.
* 1. For each label find the Set of JobIds that have this label
* 2. Then based on whether the query is an AND or OR perform a set
* intersection or union and return the result.
* @param labelList
* @param isAnd
* @return
*/
Set<JobId> getJobIdsMatchingLabels(List<Label> labelList, boolean isAnd) {
if(logger.isTraceEnabled()) { logger.trace("Entering getJobidsMatchingLabels " + labelList + " is and ? " + isAnd + " with map " + labelJobIdMap); }
Set<JobId> matchingJobIds = new HashSet<>();
List<Set<JobId>> matchingSubsets = new ArrayList<>();
if(labelList == null) {
return matchingJobIds;
}
for(Label label : labelList) {
if(labelJobIdMap.containsKey(label)) {
Set<JobId> st = new HashSet<>();
st.addAll(labelJobIdMap.get(label));
matchingSubsets.add(st);
} else {
// label not present add empty set
matchingSubsets.add(new HashSet<>());
}
}
Set<JobId> resu = (isAnd) ? getSetIntersection(matchingSubsets) : getSetUnion(matchingSubsets);
if(logger.isTraceEnabled()) { logger.trace("Exiting getJobidsMatchingLabels " + resu); }
return resu;
}
/**
* Uses the built in feature of Set API to perform a union of 'n' sets
* @param listOfSets
* @return
*/
private Set<JobId> getSetUnion(List<Set<JobId>> listOfSets) {
if(logger.isTraceEnabled()) { logger.trace("In getSetUnion " + listOfSets); }
Set<JobId> unionSet = new HashSet<>();
if(listOfSets == null || listOfSets.isEmpty()) return unionSet;
int i=0;
unionSet = listOfSets.get(i);
i++;
while(i < listOfSets.size()) {
Set<JobId> jobIds = listOfSets.get(i);
unionSet.addAll(jobIds);
i++;
}
if(logger.isTraceEnabled()) { logger.trace("Exit getSetUnion " + unionSet); }
return unionSet;
}
/**
* Uses the built in retainAll method to perform an intersection across
* 'n' sets.
* @param listOfSets
* @return
*/
private Set<JobId> getSetIntersection(List<Set<JobId>> listOfSets) {
if(logger.isTraceEnabled()) { logger.trace("In getSetIntersection " + listOfSets); }
Set<JobId> intersectionSet = new HashSet<>();
if(listOfSets == null || listOfSets.isEmpty()) return intersectionSet;
int i=0;
intersectionSet = listOfSets.get(i);
i++;
while(i < listOfSets.size()) {
Set<JobId> jobIds = listOfSets.get(i);
intersectionSet.retainAll(jobIds);
i++;
}
if(logger.isTraceEnabled()) { logger.trace("Return getSetIntersection " + intersectionSet); }
return intersectionSet;
}
}
/**
* Consolidates all processing of completed jobs
*/
static class CompletedJobCache {
private final Logger logger = LoggerFactory.getLogger(CompletedJobCache.class);
// Set of sorted terminal jobs
private final Set<CompletedJob> terminalSortedJobSet = new TreeSet<>((o1, o2) -> {
if(o1.getTerminatedAt() < o2.getTerminatedAt()) {
return 1;
} else if(o1.getTerminatedAt() > o2.getTerminatedAt()) {
return -1;
} else {
return 0;
}
});
// cluster name
private final String name;
// Map of completed jobs
private final Map<JobId, CompletedJob> completedJobs = new HashMap<>();
// Labels lookup map
private final LabelCache labelsCache;
// Map of jobmetadata
private final Map<JobId, IMantisJobMetadata> jobIdToMetadataMap = new HashMap<>();
public CompletedJobCache(String clusterName, LabelCache labelsCache) {
this.name = clusterName;
this.labelsCache = labelsCache;
}
public Set<CompletedJob> getCompletedJobSortedSet() {
return terminalSortedJobSet;
}
public Optional<CompletedJob> getCompletedJob(JobId jId) {
return ofNullable(completedJobs.getOrDefault(jId, null));
}
/**
* If job data exists in cache return it else call getArchiveJob
* @param jId
* @param jobStore
* @return
*/
public Optional<IMantisJobMetadata> getJobDataForCompletedJob(JobId jId, MantisJobStore jobStore) {
if(this.jobIdToMetadataMap.containsKey(jId)) {
return of(jobIdToMetadataMap.get(jId));
} else {
return jobStore.getArchivedJob(jId.getId());
}
}
public Set<JobId> getJobIdsMatchingLabels(List<Label> labelList, boolean isAnd) {
return labelsCache.getJobIdsMatchingLabels(labelList, isAnd);
}
public Optional<CompletedJob> markCompleted(JobId jId, Optional<IMantisJobMetadata> jobMetadata, long submittedAt, long completionTime, String user, String version, JobState finalState, MantisJobStore jobStore) {
// make sure its not already marked completed
if(!completedJobs.containsKey(jId)) {
// create completed job
List<Label> labels = new ArrayList<>();
if(jobMetadata.isPresent()) {
labels = jobMetadata.get().getLabels();
}
final CompletedJob completedJob = new CompletedJob(name, jId.getId(), version, finalState, submittedAt, completionTime, user, labels);
// add to sorted set
terminalSortedJobSet.add(completedJob);
try {
// add to local cache and store table
addToCacheAndSaveCompletedJobToStore(completedJob, jobMetadata, jobStore);
} catch (Exception e) {
logger.warn("Unable to save {} to completed jobs table due to {}", completedJob, e.getMessage(), e);
}
return of(completedJob);
} else {
logger.warn("Job {} already marked completed", jId);
return of(completedJobs.get(jId));
}
}
/**
* Completely delete jobs that are older than cut off
* @param tooOldCutOff timestamp, all jobs having an older timestamp should be deleted
* @param jobStore
*/
public void purgeOldCompletedJobs(long tooOldCutOff, MantisJobStore jobStore) {
long numDeleted = 0;
int maxJobsToPurge = ConfigurationProvider.getConfig().getMaxJobsToPurge();
final long startNanos = System.nanoTime();
for(Iterator<CompletedJob> it = completedJobs.values().iterator(); it.hasNext();) {
if(numDeleted == maxJobsToPurge) {
logger.info("{} Max clean up limit of {} reached. Stop clean up", name, maxJobsToPurge);
break;
}
CompletedJob completedJob = it.next();
if(completedJob.getTerminatedAt() < tooOldCutOff) {
try {
logger.info("Purging Job {} as it was terminated at {} which is older than cutoff {}", completedJob, completedJob.getTerminatedAt(), tooOldCutOff);
terminalSortedJobSet.remove(completedJob);
jobStore.deleteJob(completedJob.getJobId());
jobStore.deleteCompletedJob(name, completedJob.getJobId());
it.remove();
Optional<JobId> jobId = JobId.fromId(completedJob.getJobId());
if(jobId.isPresent()) {
this.jobIdToMetadataMap.remove(jobId.get());
labelsCache.removeJobIdFromLabelCache(jobId.get());
}
} catch (Exception e) {
logger.warn("Unable to purge job {} due to {}", completedJob, e);
}
numDeleted++;
} else {
if(logger.isDebugEnabled()) { logger.debug("Job {} was terminated at {} which is not older than cutoff {}",completedJob, completedJob.getTerminatedAt(), tooOldCutOff);}
}
}
if (numDeleted > 0) {
final long endNanos = System.nanoTime();
logger.info("Took {} micros to clean up {} jobs in cluster {} ", (endNanos - startNanos) / 1000, numDeleted, this.name);
}
}
/**
* During Job Cluster delete, purge all records of completed jobs
* @param jobStore
*/
void forcePurgeCompletedJobs(MantisJobStore jobStore) {
for(Iterator<CompletedJob> it = completedJobs.values().iterator(); it.hasNext();) {
CompletedJob completedJob = it.next();
try {
logger.info("Purging Job {} during job cluster cleanup", completedJob);
terminalSortedJobSet.remove(completedJob);
jobStore.deleteJob(completedJob.getJobId());
jobStore.deleteCompletedJob(name, completedJob.getJobId());
it.remove();
Optional<JobId> jobId = JobId.fromId(completedJob.getJobId());
if(jobId.isPresent()) {
this.jobIdToMetadataMap.remove(jobId.get());
labelsCache.removeJobIdFromLabelCache(jobId.get());
}
} catch (Exception e) {
logger.warn("Unable to purge job {} due to {}", completedJob, e);
}
}
}
/**
* During startup if a job is in terminal state then directly mark it as completed
* @param jobMeta job metadata of completed job
*/
public void persistToCompletedJobAndArchiveJobTables(IMantisJobMetadata jobMeta, MantisJobStore jobStore) {
try {
Instant endedAt = jobMeta.getEndedAtInstant().orElse(Instant.now());
final CompletedJob completedJob = new CompletedJob(name, jobMeta.getJobId().getId(), null, jobMeta.getState(), jobMeta.getSubmittedAtInstant().toEpochMilli(), endedAt.toEpochMilli(), jobMeta.getUser(), jobMeta.getLabels());
addToCacheAndSaveCompletedJobToStore(completedJob, of(jobMeta), jobStore);
// normally archiving is done by job actor, but these are jobs in active table that weren't archived
jobStore.archiveJob(jobMeta);
} catch (Exception e) {
logger.warn("Unable to save completed job {} to store due to {}", jobMeta, e);
}
}
private void addToCacheAndSaveCompletedJobToStore(CompletedJob completedJob, Optional<IMantisJobMetadata> jobMetaData, MantisJobStore jobStore) throws Exception {
Optional<JobId> jId = JobId.fromId(completedJob.getJobId());
if(jId.isPresent()) {
labelsCache.addJobIdToLabelCache( jId.get(),completedJob.getLabelList());
completedJobs.put(jId.get(), completedJob);
terminalSortedJobSet.add(completedJob);
if(jobMetaData.isPresent()) {
jobIdToMetadataMap.put(jId.get(), jobMetaData.get());
}
jobStore.storeCompletedJobForCluster(name, completedJob);
} else {
logger.warn("Invalid job id {} in addToCAcheAndSaveCompletedJobToStore ", completedJob);
}
}
/**
* Bulk add completed jobs to cache
* @param completedJobsList
*/
public void addCompletedJobsToCache(List<CompletedJob> completedJobsList) {
if(completedJobsList == null) {
logger.warn("addCompletedJobsToCache called with null completedJobsList");
return;
}
this.terminalSortedJobSet.addAll(completedJobsList);
completedJobsList.forEach((compJob) -> {
Optional<JobId> jId= JobId.fromId(compJob.getJobId());
if(jId.isPresent()) {
completedJobs.put(jId.get(), compJob);
labelsCache.addJobIdToLabelCache(jId.get(), compJob.getLabelList());
} else {
logger.warn("Invalid job Id {}", compJob.getJobId());
}
});
}
public boolean containsKey(JobId jobId) {
return completedJobs.containsKey(jobId);
}
}
static class CronManager {
private static final TriggerOperator triggerOperator;
private static final Logger logger = LoggerFactory.getLogger(CronManager.class);
static {
triggerOperator = new TriggerOperator(1);
try {
triggerOperator.initialize();
} catch (SchedulerException e) {
logger.error("Unexpected: {}", e.getMessage(), e);
throw new RuntimeException(e);
}
}
private final String cronSpec;
private final CronPolicy policy;
private final ActorRef clusterActor;
private String triggerId;
private final String jobClusterName;
private String triggerGroup = null;
private CronTrigger<ActorRef> scheduledTrigger;
private boolean isCronActive = false;
CronManager(String jobClusterName, ActorRef clusterActor, SLA sla) throws Exception {
this.jobClusterName = jobClusterName;
cronSpec = sla.getCronSpec();
policy = sla.getCronPolicy();
this.clusterActor = clusterActor;
if(cronSpec != null) {
initCron();
}
}
private void initCron() throws Exception{
if(cronSpec == null || triggerId != null) {
return;
}
logger.info("Init'ing cron for " + jobClusterName);
triggerGroup = jobClusterName + "-" + this;
try {
scheduledTrigger = new CronTrigger<>(cronSpec, jobClusterName, clusterActor, ActorRef.class, CronTriggerAction.class);
triggerId = triggerOperator.registerTrigger(triggerGroup, scheduledTrigger);
isCronActive = true;
} catch (IllegalArgumentException e) {
destroyCron();
logger.error("Failed to start cron for {}: {}", jobClusterName, e);
throw new SchedulerException(e.getMessage(), e);
}
}
private void destroyCron() {
try {
if (triggerId != null) {
logger.info("Destroying cron " + triggerId);
triggerId = null;
isCronActive = false;
triggerOperator.deleteTrigger(triggerGroup, triggerId);
}
} catch (TriggerNotFoundException | SchedulerException e) {
logger.warn("Couldn't delete trigger group " + triggerGroup + ", id " + triggerId);
}
}
boolean isCronActive() {
return isCronActive;
}
}
public static class CronTriggerAction implements Action1<ActorRef> {
@Override
public void call(ActorRef jobClusterActor) {
jobClusterActor.tell(new JobClusterProto.TriggerCronRequest(), ActorRef.noSender());
}
}
}
| 8,040 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/JobClusterMetadataImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import io.mantisrx.server.master.domain.IJobClusterDefinition;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
/**
* JobCluster
-------------
String name,
JobOwner owner,
SLA {
int slaMin,
int slaMax,
String cronSpec,
CronPolicy policy,
}
WorkerMigrationConfig config,
boolea readyForJobMaster,
long lastJobCount
boolean isdisabled
jobDefinitions [{
String artifactName,
String version,
long uploadedAt,
SchedulingInfo: [{
"1": {
"numberOfInstances": 15,
"machineDefinition": {
"cpuCores": 4,
"memoryMB": 12024,
"networkMbps": 512,
"diskMB": 10024,
"numPorts": 1
},
"hardConstraints": [],
"softConstraints": [],
"scalingPolicy": null,
"scalable": false
}, {
"2" : {
"numberOfInstances": 15,
"machineDefinition": {
"cpuCores": 4,
"memoryMB": 12024,
"networkMbps": 512,
"diskMB": 10024,
"numPorts": 1
},
"hardConstraints": [],
"softConstraints": [],
"scalingPolicy": null,
"scalable": false
}
}
}],
"parameters": [
{
"name": "enableCompressedBinaryInput",
"value": "True"
},
{
"name": "targetApp",
"value": "^apiproxy.*"
}
],
}]
}
* @author njoshi
*
*/
public class JobClusterMetadataImpl implements IJobClusterMetadata {
final private IJobClusterDefinition jobClusterDefinition;
final private long lastJobCount;
final private boolean disabled;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown=true)
public JobClusterMetadataImpl(@JsonProperty("jobClusterDefinition") JobClusterDefinitionImpl jcDefn,
@JsonProperty("lastJobCount") long lastJobCount,
@JsonProperty("disabled") boolean disabled) {
this.jobClusterDefinition = jcDefn;
this.lastJobCount = lastJobCount;
this.disabled = disabled;
}
/* (non-Javadoc)
* @see io.mantisrx.master.jobcluster.IJobClusterMetadata#getJobClusterDefinition()
*/
@Override
public IJobClusterDefinition getJobClusterDefinition() {
return jobClusterDefinition;
}
/* (non-Javadoc)
* @see io.mantisrx.master.jobcluster.IJobClusterMetadata#getLastJobCount()
*/
@Override
public long getLastJobCount() {
return lastJobCount;
}
/* (non-Javadoc)
* @see io.mantisrx.master.jobcluster.IJobClusterMetadata#isDisabled()
*/
@Override
public boolean isDisabled() {
return disabled;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
JobClusterMetadataImpl that = (JobClusterMetadataImpl) o;
return lastJobCount == that.lastJobCount &&
disabled == that.disabled &&
Objects.equals(jobClusterDefinition, that.jobClusterDefinition);
}
@Override
public int hashCode() {
return Objects.hash(jobClusterDefinition, lastJobCount, disabled);
}
@Override
public String toString() {
return "JobClusterMetadataImpl [jobClusterDefinition=" + jobClusterDefinition + ", lastJobCount=" + lastJobCount
+ ", disabled=" + disabled + "]";
}
public static class Builder {
private JobClusterDefinitionImpl jobClusterDefinition;
private long lastJobCount = 0;
private boolean disabled;
public Builder() {}
public Builder withJobClusterDefinition(JobClusterDefinitionImpl jobClusterDef) {
this.jobClusterDefinition = jobClusterDef;
return this;
}
public Builder withLastJobCount(long lastJobCnt) {
this.lastJobCount = lastJobCnt;
return this;
}
public Builder withIsDisabled(boolean disabled) {
this.disabled = disabled;
return this;
}
public IJobClusterMetadata build() {
return new JobClusterMetadataImpl(this.jobClusterDefinition, this.lastJobCount, this.disabled);
}
public IJobClusterMetadata build(JobClusterDefinitionImpl def, long lastJobCnt, boolean isDisabled) {
return new JobClusterMetadataImpl(def, lastJobCount, isDisabled);
}
}
}
| 8,041 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/JobDefinitionResolver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import static java.util.Optional.empty;
import static java.util.Optional.of;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.master.domain.JobClusterConfig;
import io.mantisrx.server.master.domain.JobDefinition;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is responsible for 'filling the blanks' in the provided JobDefinition during a Job Submit.
*/
public class JobDefinitionResolver {
private final Logger logger = LoggerFactory.getLogger(JobDefinitionResolver.class);
/**
*
* Encodes the logic of how to resolve the relevant fields of the submitted JobDefinition.
* Artifact | Version | SchedulingInfo | Resolution
* -------------------------------------------------
* Y | Y | Y | Use given scheduling info
* ------------------------------------------------------------
* Y | Y | N | INVALID (new artifact with no sched info)
* -------------------------------------------------------------
* Y | N | Y | Generate version and use given sched info
* --------------------------------------------------------------
* Y | N | N | INVALID (new artifact with no sched info)
* ---------------------------------------------------------------
* N | Y | Y | Lookup Cluster Config for given Version, get the SchedInfo from it and ensure given SchedInfo is compatible
* ----------------------------------------------------------------
* N | Y \ N | Lookup Cluster config for given version and use it
* -----------------------------------------------------------------
* N | N | Y | Get latest cluster config, get the SchedInfo from it and ensure given SchedInfo is compatible
* -----------------------------------------------------------------
* N | N | N | Get latest cluster config, get the SchedInfo from it
* -------------------------------------------------------------------
* @param user
* @param givenJobDefnOp
* @param jobClusterMetadata
* @return
* @throws Exception
*/
JobDefinition getResolvedJobDefinition(final String user, final JobDefinition givenJobDefnOp, final IJobClusterMetadata jobClusterMetadata) throws Exception {
Preconditions.checkNotNull(givenJobDefnOp, "JobDefinition cannot be null");
Preconditions.checkNotNull(jobClusterMetadata, "JobClusterMetadata cannot be null");
JobDefinition resolvedJobDefn = givenJobDefnOp;
logger.info("Given JobDefn {}", resolvedJobDefn);
// inherit params from cluster if not specified
List<Parameter> parameters = (resolvedJobDefn.getParameters() != null && !resolvedJobDefn.getParameters().isEmpty()) ? resolvedJobDefn.getParameters() : jobClusterMetadata.getJobClusterDefinition().getParameters();
// inherit labels from cluster if not specified
List<Label> labels = (resolvedJobDefn.getLabels() != null && !resolvedJobDefn.getLabels().isEmpty()) ? resolvedJobDefn.getLabels() : jobClusterMetadata.getJobClusterDefinition().getLabels();
String artifactName = resolvedJobDefn.getArtifactName();
SchedulingInfo schedulingInfo = resolvedJobDefn.getSchedulingInfo();
String version = resolvedJobDefn.getVersion();
JobClusterConfig jobClusterConfig = null;
if(!isNull(artifactName) && !isNull(version) && !schedulingInfoNotValid(schedulingInfo)) {
// update cluster ?
} else if(!isNull(artifactName) && !isNull(version) && schedulingInfoNotValid(schedulingInfo)) { // scheduling Info is not given while new artifact is specified
// exception
String msg = String.format("Scheduling info is not specified during Job Submit for cluster %s while new artifact is specified %s. Job Submit fails", jobClusterMetadata.getJobClusterDefinition().getName(), artifactName);
logger.warn(msg);
throw new Exception(msg);
} else if(!isNull(artifactName) && isNull(version) && !schedulingInfoNotValid(schedulingInfo)) { // artifact & schedulingInfo are given
// generate new version and update cluster
version = String.valueOf(System.currentTimeMillis());
// update cluster ?
} else if(!isNull(artifactName) && isNull(version) && schedulingInfoNotValid(schedulingInfo)) { // scheduling info not given while new artifact is specified
// exception
String msg = String.format("Scheduling info is not specified during Job Submit for cluster %s while new artifact %s is specified. Job Submit fails", jobClusterMetadata.getJobClusterDefinition().getName(), artifactName);
logger.warn(msg);
throw new Exception(msg);
} else if(isNull(artifactName) && !isNull(version) && !schedulingInfoNotValid(schedulingInfo)) { // version is given & scheduling info is given
// fetch JobCluster config for version and validate the given schedulingInfo is compatible
Optional<JobClusterConfig> clusterConfigForVersion = getJobClusterConfigForVersion(jobClusterMetadata, version);
if(!clusterConfigForVersion.isPresent()) {
String msg = String.format("No Job Cluster config could be found for version %s in JobCluster %s. Job Submit fails", version, jobClusterMetadata.getJobClusterDefinition().getName());
logger.warn(msg);
throw new Exception(msg);
}
jobClusterConfig = clusterConfigForVersion.get();
if(!validateSchedulingInfo(schedulingInfo, jobClusterConfig.getSchedulingInfo(), jobClusterMetadata)) {
String msg = String.format("Given SchedulingInfo %s is incompatible with that associated with the given version %s in JobCluster %s. Job Submit fails", schedulingInfo, version, jobClusterMetadata.getJobClusterDefinition().getName());
logger.warn(msg);
throw new Exception(msg);
}
artifactName = jobClusterConfig.getArtifactName();
} else if(isNull(artifactName) && !isNull(version) && schedulingInfoNotValid(schedulingInfo)) { // Only version is given
// fetch JobCluster config for version
Optional<JobClusterConfig> clusterConfigForVersion = getJobClusterConfigForVersion(jobClusterMetadata, version);
if(!clusterConfigForVersion.isPresent()) {
String msg = String.format("No Job Cluster config could be found for version %s in JobCluster %s. Job Submit fails", version, jobClusterMetadata.getJobClusterDefinition().getName());
logger.warn(msg);
throw new Exception(msg);
}
jobClusterConfig = clusterConfigForVersion.get();
schedulingInfo = jobClusterConfig.getSchedulingInfo();
artifactName = jobClusterConfig.getArtifactName();
} else if(isNull(artifactName) && isNull(version) && !schedulingInfoNotValid(schedulingInfo)) { // only scheduling info is given
// fetch latest Job Cluster config
jobClusterConfig = jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig();
version = jobClusterConfig.getVersion();
artifactName = jobClusterConfig.getArtifactName();
// set version to it
// validate given scheduling info is compatible
if(!validateSchedulingInfo(schedulingInfo, jobClusterConfig.getSchedulingInfo(), jobClusterMetadata)) {
String msg = String.format("Given SchedulingInfo %s is incompatible with that associated with the given version %s in JobCluster %s which is %s. Job Submit fails", schedulingInfo, version, jobClusterMetadata.getJobClusterDefinition().getName(), jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig().getSchedulingInfo());
logger.warn(msg);
throw new Exception(msg);
}
} else if(isNull(artifactName) && isNull(version) && schedulingInfoNotValid(schedulingInfo)){ // Nothing is given. Use the latest on the cluster
// fetch latest job cluster config
jobClusterConfig = jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig();
// set version to it
version = jobClusterConfig.getVersion();
// use scheduling info from that.
schedulingInfo = jobClusterConfig.getSchedulingInfo();
artifactName = jobClusterConfig.getArtifactName();
} else {
// exception should never get here.
throw new Exception(String.format("Invalid case for resolveJobDefinition artifactName %s version %s schedulingInfo %s", artifactName, version, schedulingInfo));
}
logger.info("Resolved version {}, schedulingInfo {}, artifactName {}", version, schedulingInfo, artifactName);
if(isNull(artifactName) || isNull(version) || schedulingInfoNotValid(schedulingInfo)) {
String msg = String.format(" SchedulingInfo %s or artifact %s or version %s could not be resolved in JobCluster %s. Job Submit fails", schedulingInfo, artifactName, version, jobClusterMetadata.getJobClusterDefinition().getName());
logger.warn(msg);
throw new Exception(msg);
}
return new JobDefinition.Builder()
.from(resolvedJobDefn)
.withParameters(parameters)
.withLabels(labels)
.withSchedulingInfo(schedulingInfo)
.withUser(user)
.withVersion(version)
.withArtifactName(artifactName)
.build();
}
private static boolean schedulingInfoNotValid(SchedulingInfo schedulingInfo) {
if(schedulingInfo == null || schedulingInfo.getStages().isEmpty()) {
return true;
}
return false;
}
private static boolean isNull(String key) {
return (key == null || key.equals("null") || key.isEmpty()) ? true : false;
}
/**
* Lookup the job cluster config for the given version in the list of job cluster configs
* @param jobClusterMetadata
* @param version
* @return
*/
Optional<JobClusterConfig> getJobClusterConfigForVersion(final IJobClusterMetadata jobClusterMetadata, final String version) {
Preconditions.checkNotNull(jobClusterMetadata, "JobClusterMetadata cannot be null");
Preconditions.checkNotNull(version, "Version cannot be null");
final String versionToFind = version;
List<JobClusterConfig> configList = jobClusterMetadata.getJobClusterDefinition()
.getJobClusterConfigs()
.stream()
.filter((cfg) -> cfg.getVersion().equals(versionToFind))
.collect(Collectors.toList());
if(!configList.isEmpty()) {
return of(configList.get(0));
} else {
// unknown version
String msg = String.format("No config with version %s found for Job Cluster %s. Job Submit fails", versionToFind, jobClusterMetadata.getJobClusterDefinition().getName());
logger.warn(msg);
return empty();
}
}
/**
* Compare given scheduling info with that configured for this artifact to make sure it is compatible
* - Ensure number of stages match
* @param givenSchedulingInfo
* @param configuredSchedulingInfo
* @param jobClusterMetadata
* @return
* @throws Exception
*/
private boolean validateSchedulingInfo(final SchedulingInfo givenSchedulingInfo, final SchedulingInfo configuredSchedulingInfo, final IJobClusterMetadata jobClusterMetadata) throws Exception {
int givenNumStages = givenSchedulingInfo.getStages().size();
int existingNumStages = configuredSchedulingInfo.getStages().size();
// isReadyForJobMaster is not reliable, just check if stage 0 is defined and decrement overall count
//if (jobClusterMetadata.getJobClusterDefinition().getIsReadyForJobMaster()) {
if (givenSchedulingInfo.forStage(0) != null)
givenNumStages--; // decrement to get net numStages without job master
if (configuredSchedulingInfo.forStage(0) != null)
existingNumStages--;
//}
if(givenNumStages != existingNumStages) {
logger.warn("Mismatched scheduling info: expecting #stages=" +
existingNumStages + " for given jar version [" + " " +
"], where as, given scheduling info has #stages=" + givenNumStages);
return false;
}
return true;
}
// private SchedulingInfo getSchedulingInfoForArtifact(String artifactName, final IJobClusterMetadata jobClusterMetadata) throws Exception{
// logger.info("Entering getSchedulingInfoForArtifact {}", artifactName);
// SchedulingInfo resolvedSchedulingInfo = null;
// List<JobClusterConfig> configList = jobClusterMetadata.getJobClusterDefinition().getJobClusterConfigs().stream().filter((cfg) -> cfg.getArtifactName().equals(artifactName)).collect(Collectors.toList());
// if (configList.isEmpty()) { // new artifact
// throw new Exception("Scheduling info must be provided along with new artifact");
// } else {
//
// JobClusterConfig preconfiguredConfigForArtifact = null;
// for(JobClusterConfig config : configList) {
// if(artifactName.equals(config.getArtifactName())) {
// preconfiguredConfigForArtifact = config;
// break;
// }
// }
// if(preconfiguredConfigForArtifact != null) {
//
// logger.info("Found schedulingInfo {} for artifact {}", preconfiguredConfigForArtifact.getSchedulingInfo(), artifactName);
// resolvedSchedulingInfo = preconfiguredConfigForArtifact.getSchedulingInfo();
// } else {
//
// logger.warn("No Config found for artifact {} using default", artifactName);
// JobClusterConfig config = configList.get(0);
// resolvedSchedulingInfo = config.getSchedulingInfo();
// }
// }
// logger.info("Exiting getSchedulingInfoForArtifact {} -> with resolved config {}", artifactName, resolvedSchedulingInfo);
// return resolvedSchedulingInfo;
//
// }
}
| 8,042 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/IJobClusterMetadata.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import io.mantisrx.server.master.domain.IJobClusterDefinition;
public interface IJobClusterMetadata {
IJobClusterDefinition getJobClusterDefinition();
long getLastJobCount();
boolean isDisabled();
} | 8,043 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/PersistException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
public class PersistException extends Exception {
public PersistException(String msg) {
super(msg);
}
public PersistException(Throwable e) {
super(e);
}
} | 8,044 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/SLAEnforcer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import io.mantisrx.master.jobcluster.JobClusterActor.JobInfo;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.domain.SLA;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SLAEnforcer {
private static final Logger logger = LoggerFactory.getLogger(SLAEnforcer.class);
private final Optional<SLA> sla;
private final Comparator<JobInfo> comparator = (o1, o2) -> {
if (o2 == null)
return -1;
if (o1 == null)
return 1;
return Long.compare(o1.jobId.getJobNum(), o2.jobId.getJobNum());
};
public SLAEnforcer(SLA sla) {
this.sla = Optional.ofNullable(sla);
}
/**
*
* @param activeJobsCount
* @param acceptedJobsCount
* @return
*/
public int enforceSLAMin(int activeJobsCount, int acceptedJobsCount) {
Preconditions.checkArgument(activeJobsCount >=0, "Invalid activeJobsCount " + activeJobsCount);
Preconditions.checkArgument(acceptedJobsCount >=0, "Invalid acceptedJobsCount " + activeJobsCount);
// if no min sla defined
if(!sla.isPresent() || sla.get().getMin() == 0) {
logger.debug("SLA min not set nothing to enforce");
return 0;
}
int jobsInActiveOrSubmittedState = activeJobsCount + acceptedJobsCount;
if(jobsInActiveOrSubmittedState < sla.get().getMin()) {
int jobsToLaunch = sla.get().getMin()-jobsInActiveOrSubmittedState;
logger.info("Submit {} jobs per sla min of {}", jobsToLaunch, sla.get().getMin());
return jobsToLaunch;
}
logger.debug("SLA min already satisfied");
return 0;
}
/**
* Walk the set of jobs in descending order (newest jobs first) track no. of running jobs. Once this
* count equals slamax mark the rest of them for deletion.
*
* @param list A sorted (by job number) set of jobs in either running or accepted state
* @return
*/
public List<JobId> enforceSLAMax(List<JobInfo> list) {
Preconditions.checkNotNull(list, "runningOrAcceptedJobSet is null");
List<JobId> jobsToDelete = Lists.newArrayList();
// if no max sla defined;
if(!sla.isPresent() || sla.get().getMax() ==0 ) {
return jobsToDelete;
}
SortedSet<JobInfo> sortedJobSet = new TreeSet<>(comparator);
sortedJobSet.addAll(list);
JobInfo [] jobIdArray = sortedJobSet.toArray(new JobInfo[list.size()]);
int activeJobCount = 0;
int slaMax = sla.get().getMax();
boolean addToDeleteList = false;
for(int i=jobIdArray.length-1; i>=0; i--) {
JobInfo jInfo = jobIdArray[i];
if(addToDeleteList) {
jobsToDelete.add(jInfo.jobId);
} else {
if (jInfo.state.equals(JobState.Launched)) {
activeJobCount++;
if (activeJobCount == slaMax) {
addToDeleteList = true;
}
}
}
}
return jobsToDelete;
}
public boolean hasSLA() {
if(!sla.isPresent() || sla == null || (sla.get().getMin() == 0 && sla.get().getMax() == 0)) {
// No SLA == NO OP
return false;
}
return true;
}
/**
* For Testing
* @param list
* @return
*/
Set<JobInfo> sortJobsByIdDesc(List<JobInfo> list) {
SortedSet<JobInfo> sortedJobSet = new TreeSet<>(comparator);
sortedJobSet.addAll(list);
return sortedJobSet;
}
}
| 8,045 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/MantisJobClusterMetadataView.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.master.domain.DataFormatAdapter;
import io.mantisrx.server.master.domain.JobClusterConfig;
import io.mantisrx.server.master.domain.SLA;
import io.mantisrx.server.master.store.NamedJob;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.util.List;
import java.util.Objects;
@JsonFilter("topLevelFilter")
public class MantisJobClusterMetadataView {
private final String name;
private final List<NamedJob.Jar> jars;
private final NamedJob.SLA sla;
private final List<Parameter> parameters;
private final JobOwner owner;
private final long lastJobCount;
private final boolean disabled;
private final boolean isReadyForJobMaster;
private final WorkerMigrationConfig migrationConfig;
private final List<Label> labels;
private final boolean cronActive;
@JsonIgnore
private final String latestVersion;
@JsonCreator
public MantisJobClusterMetadataView(@JsonProperty("name") String name, @JsonProperty("jars") List<NamedJob.Jar> jars, @JsonProperty("sla") NamedJob.SLA sla,
@JsonProperty("parameters") List<Parameter> parameters, @JsonProperty("owner") JobOwner owner, @JsonProperty("lastJobCount") long lastJobCount,
@JsonProperty("disabled") boolean disabled, @JsonProperty("isReadyForJobMaster") boolean isReadyForJobMaster, @JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig,
@JsonProperty("labels") List<Label> labels, @JsonProperty("cronActive") boolean cronActive, @JsonProperty("latestVersion") String latestVersion) {
this.name = name;
this.jars = jars;
this.sla = sla;
this.parameters = parameters;
this.owner = owner;
this.lastJobCount = lastJobCount;
this.disabled = disabled;
this.isReadyForJobMaster = isReadyForJobMaster;
this.migrationConfig = migrationConfig;
this.labels = labels;
this.cronActive = cronActive;
this.latestVersion = latestVersion;
}
public String getName() {
return name;
}
public List<NamedJob.Jar> getJars() {
return jars;
}
public NamedJob.SLA getSla() {
return sla;
}
public List<Parameter> getParameters() {
return parameters;
}
public JobOwner getOwner() {
return owner;
}
public long getLastJobCount() {
return lastJobCount;
}
public boolean isDisabled() {
return disabled;
}
public boolean getIsReadyForJobMaster() {
return isReadyForJobMaster;
}
public WorkerMigrationConfig getMigrationConfig() {
return migrationConfig;
}
public List<Label> getLabels() {
return labels;
}
public boolean isCronActive() {
return cronActive;
}
public String getLatestVersion() {
return latestVersion;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MantisJobClusterMetadataView that = (MantisJobClusterMetadataView) o;
return lastJobCount == that.lastJobCount &&
disabled == that.disabled &&
isReadyForJobMaster == that.isReadyForJobMaster &&
cronActive == that.cronActive &&
Objects.equals(name, that.name) &&
Objects.equals(jars, that.jars) &&
Objects.equals(sla, that.sla) &&
Objects.equals(parameters, that.parameters) &&
Objects.equals(owner, that.owner) &&
Objects.equals(migrationConfig, that.migrationConfig) &&
Objects.equals(labels, that.labels) &&
Objects.equals(latestVersion, that.latestVersion);
}
@Override
public int hashCode() {
return Objects.hash(name, jars, sla, parameters, owner, lastJobCount, disabled, isReadyForJobMaster, migrationConfig, labels, cronActive, latestVersion);
}
@Override
public String toString() {
return "MantisJobClusterMetadataView{" +
"name='" + name + '\'' +
", jars=" + jars +
", sla=" + sla +
", parameters=" + parameters +
", owner=" + owner +
", lastJobCount=" + lastJobCount +
", disabled=" + disabled +
", isReadyForJobMaster=" + isReadyForJobMaster +
", migrationConfig=" + migrationConfig +
", labels=" + labels +
", cronActive=" + cronActive +
", latestVersion='" + latestVersion + '\'' +
'}';
}
public static class Builder {
private String name;
private List<NamedJob.Jar> jars = Lists.newArrayList();
private NamedJob.SLA sla;
private List<Parameter> parameters = Lists.newArrayList();
private JobOwner owner;
private long lastJobCount;
private boolean disabled = false;
private boolean isReadyForJobMaster = true;
private WorkerMigrationConfig migrationConfig;
private List<Label> labels = Lists.newArrayList();
private boolean cronActive = false;
private String latestVersion;
public Builder() {
}
public Builder withName(String name) {
this.name = name;
return this;
}
public Builder withJars(List<JobClusterConfig> jars) {
this.jars = DataFormatAdapter.convertJobClusterConfigsToJars(jars);
return this;
}
public Builder withSla(SLA sla) {
this.sla = DataFormatAdapter.convertSLAToNamedJobSLA(sla);
return this;
}
public Builder withParameters(List<Parameter> params) {
this.parameters = params;
return this;
}
public Builder withJobOwner(JobOwner owner) {
this.owner = owner;
return this;
}
public Builder withLastJobCount(long cnt) {
this.lastJobCount = cnt;
return this;
}
public Builder withDisabled(boolean disabled) {
this.disabled = disabled;
return this;
}
public Builder withIsReadyForJobMaster(boolean isReadyForJobMaster) {
this.isReadyForJobMaster = isReadyForJobMaster;
return this;
}
public Builder withMigrationConfig(WorkerMigrationConfig config) {
this.migrationConfig = config;
return this;
}
public Builder withLabels(List<Label> labels) {
this.labels = labels;
return this;
}
public Builder isCronActive(boolean cronActive) {
this.cronActive = cronActive;
return this;
}
public Builder withLatestVersion(String version) {
this.latestVersion = version;
return this;
}
public MantisJobClusterMetadataView build() {
Preconditions.checkNotNull(name, "name cannot be null");
Preconditions.checkNotNull(jars, "Jars cannot be null");
Preconditions.checkArg(!jars.isEmpty(),"Jars cannot be empty");
Preconditions.checkNotNull(latestVersion, "version cannot be null");
return new MantisJobClusterMetadataView(name,jars,sla,parameters,owner,lastJobCount,disabled,isReadyForJobMaster,migrationConfig,labels,cronActive,latestVersion);
}
}
}
| 8,046 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/WorkerInfoListHolder.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.server.master.domain.JobId;
import java.util.List;
public class WorkerInfoListHolder {
private final JobId jobId;
private final List<IMantisWorkerMetadata> workerMetadataList;
public WorkerInfoListHolder(JobId jobId, List<IMantisWorkerMetadata> workerMetadataList) {
this.jobId = jobId;
this.workerMetadataList = workerMetadataList;
}
public JobId getJobId() {
return jobId;
}
public List<IMantisWorkerMetadata> getWorkerMetadataList() {
return workerMetadataList;
}
@Override
public String toString() {
return "WorkerInfoListHolder{"
+ " jobId=" + jobId
+ ", workerMetadataList=" + workerMetadataList
+ '}';
}
}
| 8,047 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/JobListHelper.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import static java.util.Optional.empty;
import static java.util.Optional.ofNullable;
import io.mantisrx.master.jobcluster.JobClusterActor.JobInfo;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob;
import io.mantisrx.server.master.domain.JobId;
import java.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JobListHelper {
private static final Logger logger = LoggerFactory.getLogger(JobListHelper.class);
// /**
// * Note: rawResults are already filtered by jobstate and limit has been already applied to reduce unnecessary work
// * @param rawResultList
// * @param request
// * @return
// */
// public static List<JobInfo> getPreFilteredNonTerminalJobList(final List<JobInfo> rawResultList,
// ListJobCriteria request) {
// if(logger.isDebugEnabled()) { logger.debug("Entering getPreFilteredNonTerminalJobList with raw list size {} and criteria {}", rawResultList.size(), request); }
//
// Collections.sort(rawResultList,Comparator.comparingLong(jinfo -> jinfo.submittedAt));
//
// if(request.getLimit().isPresent()) {
// return rawResultList.subList(0, Math.min(rawResultList.size(), request.getLimit().get()));
// }
// if(logger.isDebugEnabled()) { logger.debug("Returning {} jobs in nonterminalstate ", rawResultList.size()); }
// return rawResultList;
// }
//
// public static List<CompletedJob> getPreFilteredTerminalJobList(final List<CompletedJob> rawResultList, ListJobCriteria request) {
// List<CompletedJob> resultList = Lists.newArrayList();
// Observable.from(rawResultList)
// .filter((completedJob) -> {
// if(request.getActiveOnly().isPresent()) {
// return false;
// }
// return true;
// })
// .toSortedList((c1, c2) -> Long.compare(c1.getSubmittedAt(), c2.getSubmittedAt()) )
// .subscribe((cList) -> {
// resultList.addAll(cList);
// });
// if(request.getLimit().isPresent()) {
// return resultList.subList(0, Math.min(resultList.size(), request.getLimit().get()));
// }
// return resultList;
// }
public static Optional<JobId> getLastSubmittedJobId(final List<JobInfo> existingJobsList,
final List<CompletedJob> completedJobs) {
if(logger.isTraceEnabled()) { logger.trace("Entering getLastSubmittedJobDefinition existing jobs {} completedJobs {}",existingJobsList.size(),completedJobs.size() ); }
long highestJobNumber = -1;
JobInfo jInfoWithHighestJobNumber = null;
CompletedJob completedJobWithHighestJobNumber = null;
if(logger.isDebugEnabled()) { logger.debug("No of active jobs: {}", existingJobsList.size()); }
for (JobInfo jInfo : existingJobsList) {
if (jInfo.jobId.getJobNum() > highestJobNumber) {
highestJobNumber = jInfo.jobId.getJobNum();
jInfoWithHighestJobNumber = jInfo;
}
}
if(logger.isDebugEnabled()) { logger.debug("Highest Active job number: {}", highestJobNumber); }
if(highestJobNumber != -1) {
return ofNullable(jInfoWithHighestJobNumber.jobId);
} else {
// search in completed Jobs
for (CompletedJob cJob : completedJobs) {
Optional<JobId> completedJobId = JobId.fromId(cJob.getJobId());
if (completedJobId.isPresent() && completedJobId.get().getJobNum() > highestJobNumber) {
highestJobNumber = completedJobId.get().getJobNum();
completedJobWithHighestJobNumber = cJob;
}
}
if(highestJobNumber != -1) {
if(logger.isDebugEnabled()) { logger.debug("Highest completed job number: {}", highestJobNumber); }
return (JobId.fromId(completedJobWithHighestJobNumber.getJobId()));
}
}
return empty();
}
}
| 8,048 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/proto/JobArtifactProto.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.proto;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.server.core.domain.ArtifactID;
import io.mantisrx.server.core.domain.JobArtifact;
import java.util.List;
import lombok.EqualsAndHashCode;
import lombok.Value;
public class JobArtifactProto {
@EqualsAndHashCode(callSuper = true)
@Value
public static class SearchJobArtifactsRequest extends BaseRequest {
String name;
String version;
public SearchJobArtifactsRequest(String name, String version) {
super();
Preconditions.checkNotNull(name, "JobArtifact name cannot be null");
this.name = name;
this.version = version;
}
}
@EqualsAndHashCode(callSuper = true)
@Value
public static class SearchJobArtifactsResponse extends BaseResponse {
List<JobArtifact> jobArtifacts;
// TODO(fdichiara): add paginated list.
public SearchJobArtifactsResponse(
long requestId,
ResponseCode responseCode,
String message,
List<JobArtifact> jobArtifacts) {
super(requestId, responseCode, message);
this.jobArtifacts = jobArtifacts;
}
}
@EqualsAndHashCode(callSuper = true)
@Value
public static class ListJobArtifactsByNameRequest extends BaseRequest {
String prefix;
String contains;
public ListJobArtifactsByNameRequest(String prefix, String contains) {
super();
this.prefix = prefix;
this.contains = contains;
}
}
@EqualsAndHashCode(callSuper = true)
@Value
public static class ListJobArtifactsByNameResponse extends BaseResponse {
List<String> names;
// TODO(fdichiara): add paginated list.
public ListJobArtifactsByNameResponse(
long requestId,
ResponseCode responseCode,
String message,
List<String> names) {
super(requestId, responseCode, message);
this.names = names;
}
}
@EqualsAndHashCode(callSuper = true)
@Value
public static class UpsertJobArtifactRequest extends BaseRequest {
JobArtifact jobArtifact;
public UpsertJobArtifactRequest(final JobArtifact jobArtifact) {
super();
Preconditions.checkNotNull(jobArtifact, "JobArtifact cannot be null");
this.jobArtifact = jobArtifact;
}
}
@EqualsAndHashCode(callSuper = true)
@Value
public static class UpsertJobArtifactResponse extends BaseResponse {
ArtifactID artifactID;
public UpsertJobArtifactResponse(
final long requestId,
final ResponseCode responseCode,
final String message,
final ArtifactID artifactID
) {
super(requestId, responseCode, message);
this.artifactID = artifactID;
}
}
}
| 8,049 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/proto/JobClusterProto.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.proto;
import static java.util.Optional.ofNullable;
import akka.actor.ActorRef;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.time.Instant;
import java.util.List;
import java.util.Optional;
public class JobClusterProto {
/**
* This message is sent to a JobCluster Actor from
* 1. JobClustersManagerActor during bootstrap - in which case the Job Cluster Actor will create and initialize the list of jobs passed in this message
* 2. JobClustersManagerActor on receiving a CreateJobClusterRequest from the user - in which case the job cluster actor will persist to storage
* @author njoshi
*
*/
public static final class InitializeJobClusterRequest extends BaseRequest {
public final JobClusterDefinitionImpl jobClusterDefinition;
public final ActorRef requestor;
public final String user;
public final boolean isDisabled;
public final long lastJobNumber;
public final boolean createInStore;
public final List<IMantisJobMetadata> jobList;
public final List<CompletedJob> completedJobsList;
/**
* Invoked directly during bootstrap
* @param jobClusterDefinition
* @param isDisabled
* @param lastJobNumber
* @param jobList
* @param completedJobsList
* @param user
* @param requestor
* @param createInStore
*/
public InitializeJobClusterRequest(final JobClusterDefinitionImpl jobClusterDefinition, boolean isDisabled, long lastJobNumber,
List<IMantisJobMetadata> jobList, List<CompletedJob> completedJobsList, String user, ActorRef requestor, boolean createInStore) {
super();
Preconditions.checkNotNull(jobClusterDefinition, "JobClusterDefn cannot be null");
this.jobClusterDefinition = jobClusterDefinition;
this.user = user;
this.requestor = requestor;
this.createInStore = createInStore;
this.isDisabled = isDisabled;
this.lastJobNumber = lastJobNumber;
this.jobList = jobList;
this.completedJobsList = completedJobsList;
}
/**
* Invoked during Job Cluster Creation
* @param jobClusterDefinition
* @param user
* @param requestor
*/
public InitializeJobClusterRequest(final JobClusterDefinitionImpl jobClusterDefinition, String user, ActorRef requestor) {
this(jobClusterDefinition, false, 0, Lists.newArrayList(), Lists.newArrayList(), user, requestor, true);
}
@Override
public String toString() {
return "InitializeJobClusterRequest{" +
"jobClusterDefinition=" + jobClusterDefinition +
", requestor=" + requestor +
", user='" + user + '\'' +
", isDisabled=" + isDisabled +
", lastJobNumber=" + lastJobNumber +
", createInStore=" + createInStore +
", jobList=" + jobList +
", completedJobsList=" + completedJobsList +
'}';
}
}
/**
* Indicates whether a job cluster was initialized successfully
* Typical failures include unable to write to store
* @author njoshi
*
*/
public static final class InitializeJobClusterResponse extends BaseResponse {
public final ActorRef requestor;
public final String jobClusterName;
public InitializeJobClusterResponse(final long requestId,
final ResponseCode responseCode,
final String message,
final String jobClusterName,
final ActorRef requestor) {
super(requestId, responseCode, message);
this.requestor = requestor;
this.jobClusterName = jobClusterName;
}
}
// public static final class AddArchivedJobs {
// public final List<IMantisJobMetadata> archivedJobsList;
// public AddArchivedJobs(List<IMantisJobMetadata> list) {
// this.archivedJobsList = list;
// }
// }
/**
* Deletes all records associated with this job cluster in store
* Terminates cluster actor
*
* Only allowed if there are no jobs currently running.
* @author njoshi
*
*/
public static final class DeleteJobClusterRequest extends BaseRequest {
public final String jobClusterName;
public final String user;
public final ActorRef requestingActor;
public DeleteJobClusterRequest(final String user, final String name, final ActorRef requestor) {
super();
this.jobClusterName = name;
this.user = user;
this.requestingActor = requestor;
}
}
/**
* Whether the delete was successful
* @author njoshi
*
*/
public static final class DeleteJobClusterResponse extends BaseResponse {
public final ActorRef requestingActor;
public final String clusterName;
public DeleteJobClusterResponse(long requestId, ResponseCode responseCode, String message, ActorRef requestingActor, String clusterName) {
super(requestId, responseCode, message);
this.requestingActor = requestingActor;
this.clusterName = clusterName;
}
public ActorRef getRequestingActor() {
return requestingActor;
}
public String getClusterName() {
return clusterName;
}
}
public static final class KillJobRequest extends BaseRequest {
public final JobId jobId;
public final String reason;
public final JobCompletedReason jobCompletedReason;
public final String user;
public final ActorRef requestor;
public KillJobRequest(final JobId jobId,
final String reason,
final JobCompletedReason jobCompletedReason,
final String user,
final ActorRef requestor) {
super();
this.jobId = jobId;
this.reason = reason;
this.jobCompletedReason = jobCompletedReason;
this.user = user;
this.requestor = requestor;
}
@Override
public String toString() {
return "KillJobRequest [jobId=" + jobId + ", reason=" + reason + ", user=" + user + ", requestor="
+ requestor + "]";
}
}
public static final class KillJobResponse extends BaseResponse {
public final JobId jobId;
public final ActorRef requestor;
public final JobState state;
public final String user;
public final Optional<IMantisJobMetadata> jobMetadata;
public KillJobResponse(long requestId, ResponseCode responseCode, JobState state, String message, JobId jobId, IMantisJobMetadata jobMeta, String user,
final ActorRef requestor) {
super(requestId, responseCode, message);
this.jobId = jobId;
this.requestor = requestor;
this.state = state;
this.user = user;
this.jobMetadata = ofNullable(jobMeta);
}
@Override
public String toString() {
return "KillJobResponse{" +
"jobId=" + jobId +
", requestor=" + requestor +
", state=" + state +
", user='" + user + '\'' +
", jobMetadata=" + jobMetadata +
'}';
}
}
public static final class JobStartedEvent {
public final JobId jobid;
public JobStartedEvent(JobId jobId) {
this.jobid = jobId;
}
@Override
public String toString() {
return "JobStartedEvent [jobid=" + jobid + "]";
}
}
public static final class EnforceSLARequest {
public final Instant timeOfEnforcement;
public final Optional<JobDefinition> jobDefinitionOp;
public EnforceSLARequest() {
this(Instant.now(),Optional.empty());
}
public EnforceSLARequest(Instant now) {
this( now, Optional.empty());
}
public EnforceSLARequest(Instant now, Optional<JobDefinition> jobDefnOp) {
this.timeOfEnforcement = now;
this.jobDefinitionOp = jobDefnOp;
}
}
public static final class ExpireOldJobsRequest {
public final Instant timeOfEnforcement;
public ExpireOldJobsRequest() {
this(Instant.now());
}
public ExpireOldJobsRequest(Instant now) {
this.timeOfEnforcement = now;
}
}
public static final class BookkeepingRequest {
public final Instant time;
public BookkeepingRequest(Instant time) {
this.time = time;;
}
public BookkeepingRequest() {
this(Instant.now());
}
}
public static final class TriggerCronRequest {
public final Instant time;
public TriggerCronRequest(Instant time) {
this.time = time;;
}
public TriggerCronRequest() {
this(Instant.now());
}
}
}
| 8,050 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/proto/JobProto.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.proto;
import akka.actor.ActorRef;
import io.mantisrx.server.master.domain.JobId;
import java.time.Instant;
public class JobProto {
public interface JobEvent {
public String getName();
}
public static final class InitJob extends BaseRequest {
public final ActorRef requstor;
public final boolean isSubmit;
public InitJob(ActorRef requestor) {
this(requestor, true);
}
public InitJob(ActorRef requestor, boolean isSubmit) {
this.requstor = requestor;
this.isSubmit = isSubmit;
}
@Override
public String toString() {
return "InitJob{" +
"requstor=" + requstor +
", isSubmit=" + isSubmit +
", requestId=" + requestId +
'}';
}
}
public static final class JobInitialized extends BaseResponse {
public final JobId jobId;
public final ActorRef requestor;
public JobInitialized(final long requestId,
final ResponseCode responseCode,
final String message, JobId jobId, ActorRef requestor) {
super(requestId, responseCode, message);
this.jobId = jobId;
this.requestor = requestor;
}
@Override
public String toString() {
return "JobInitialized{" +
"jobId=" + jobId +
", requestor=" + requestor +
", requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
/////////////////////////////////// JOB Related Messages ///////////////////////////////////////////////
public static final class RuntimeLimitReached {
}
public static final class CheckHeartBeat {
Instant n = null;
public CheckHeartBeat() {
}
public CheckHeartBeat(Instant now) {
n = now;
}
public Instant getTime() {
if(n == null) {
return Instant.now();
} else {
return n;
}
}
}
public static final class SendWorkerAssignementsIfChanged {
}
public static final class MigrateDisabledVmWorkersRequest {
public final Instant time;
public MigrateDisabledVmWorkersRequest(Instant time) {
this.time = time;;
}
public MigrateDisabledVmWorkersRequest() {
this(Instant.now());
}
}
public static class SelfDestructRequest {
}
}
| 8,051 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/proto/BaseResponse.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.proto;
import com.netflix.spectator.impl.Preconditions;
public class BaseResponse {
public enum ResponseCode {
SUCCESS(200), //200
SUCCESS_CREATED(201), //201
CLIENT_ERROR(400), //400
CLIENT_ERROR_NOT_FOUND(404), //404
OPERATION_NOT_ALLOWED(405), //405
CLIENT_ERROR_CONFLICT(409), //409
SERVER_ERROR(500); //500
private final int value;
ResponseCode(int val) {
this.value = val;
}
public int getValue() {
return this.value;
}
}
public final long requestId;
public final ResponseCode responseCode;
public final String message;
public BaseResponse(
final long requestId,
final ResponseCode responseCode,
final String message) {
Preconditions.checkNotNull(responseCode, "Response code cannot be null");
// Preconditions.checkArg(message != null, "message cannot be null");
this.requestId = requestId;
this.responseCode = responseCode;
this.message = message;
}
}
| 8,052 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/proto/JobClusterManagerProto.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.proto;
import akka.actor.ActorRef;
import akka.http.javadsl.model.Uri;
import com.mantisrx.common.utils.LabelUtils;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.common.Label;
import io.mantisrx.master.api.akka.route.pagination.ListObject;
import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter.JobIdInfo;
import io.mantisrx.master.jobcluster.MantisJobClusterMetadataView;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataView;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.master.domain.IJobClusterDefinition;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.google.common.base.Strings;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.time.Instant;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Function;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
import rx.subjects.BehaviorSubject;
public class JobClusterManagerProto {
public static final class CreateJobClusterRequest extends BaseRequest {
private final JobClusterDefinitionImpl jobClusterDefinition;
private final String user;
public CreateJobClusterRequest(
final JobClusterDefinitionImpl jobClusterDefinition,
String user) {
super();
Preconditions.checkNotNull(jobClusterDefinition, "JobClusterDefn cannot be null");
this.jobClusterDefinition = jobClusterDefinition;
this.user = user;
}
public JobClusterDefinitionImpl getJobClusterDefinition() {
return jobClusterDefinition;
}
public String getUser() {
return user;
}
}
public static final class ReconcileJobCluster {
public final Instant timeOfEnforcement;
public ReconcileJobCluster(Instant now) {
timeOfEnforcement = now;
}
public ReconcileJobCluster() {
timeOfEnforcement = Instant.now();
}
}
public static final class CreateJobClusterResponse extends BaseResponse {
private final String jobClusterName;
public CreateJobClusterResponse(
final long requestId,
final ResponseCode responseCode,
final String message,
final String jobClusterName
) {
super(requestId, responseCode, message);
this.jobClusterName = jobClusterName;
}
public String getJobClusterName() {
return jobClusterName;
}
@Override
public String toString() {
return "CreateJobClusterResponse{" +
"jobClusterName='" + jobClusterName + '\'' +
", requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
public static final class DeleteJobClusterRequest extends BaseRequest {
private final String name;
private final String user;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public DeleteJobClusterRequest(
@JsonProperty("user") final String user,
@JsonProperty("name") final String name) {
super();
Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request");
Preconditions.checkArg(
name != null & !name.isEmpty(),
"Must provide job cluster name in request");
this.user = user;
this.name = name;
}
public String getName() {
return name;
}
public String getUser() {
return user;
}
}
public static final class DeleteJobClusterResponse extends BaseResponse {
public DeleteJobClusterResponse(long requestId, ResponseCode responseCode, String message) {
super(requestId, responseCode, message);
}
}
public static final class JobClustersManagerInitialize extends BaseRequest {
private final MantisSchedulerFactory schedulerFactory;
private final boolean loadJobsFromStore;
public JobClustersManagerInitialize(
final MantisSchedulerFactory schedulerFactory,
final boolean loadJobsFromStore) {
Preconditions.checkNotNull(schedulerFactory, "MantisScheduler cannot be null");
this.schedulerFactory = schedulerFactory;
this.loadJobsFromStore = loadJobsFromStore;
}
public MantisSchedulerFactory getScheduler() {
return schedulerFactory;
}
public boolean isLoadJobsFromStore() {
return loadJobsFromStore;
}
}
public static final class JobClustersManagerInitializeResponse extends BaseResponse {
public JobClustersManagerInitializeResponse(
long requestId,
ResponseCode responseCode,
String message) {
super(requestId, responseCode, message);
}
@Override
public String toString() {
return "JobClustersManagerInitializeResponse{" +
"requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
/**
* Get a list of all job clusters in the system
*
* @author njoshi
*/
public static final class ListJobClustersRequest extends BaseRequest {
public ListJobClustersRequest() {
super();
}
}
public static final class ListJobClustersResponse extends BaseResponse {
private final List<MantisJobClusterMetadataView> jobClusters;
public ListJobClustersResponse(
long requestId,
ResponseCode responseCode,
String message,
List<MantisJobClusterMetadataView> jobClusters) {
super(requestId, responseCode, message);
this.jobClusters = jobClusters;
}
public List<MantisJobClusterMetadataView> getJobClusters() {
return jobClusters;
}
public ListObject<MantisJobClusterMetadataView> getJobClusters(
String regexMatcher,
Integer limit,
Integer offset,
String sortField,
Boolean sortAscending,
Uri uri) {
List<MantisJobClusterMetadataView> targetJobClusters = jobClusters;
if (!Strings.isNullOrEmpty(regexMatcher)) {
Pattern matcher = Pattern.compile(regexMatcher, Pattern.CASE_INSENSITIVE);
targetJobClusters = targetJobClusters.stream()
.filter(jobCluster -> matcher.matcher(jobCluster.getName()).find())
.collect(Collectors.toList());
}
ListObject.Builder<MantisJobClusterMetadataView> builder =
new ListObject.Builder<MantisJobClusterMetadataView>()
.withObjects(targetJobClusters, MantisJobClusterMetadataView.class);
if (limit != null) {
builder = builder.withLimit(limit);
}
if (offset != null) {
builder = builder.withOffset(offset);
}
if (sortField != null) {
builder = builder.withSortField(sortField);
}
if (sortAscending != null) {
builder = builder.withSortAscending(sortAscending);
}
if (uri != null) {
builder = builder.withUri(uri);
}
return builder.build();
}
@Override
public String toString() {
return "ListJobClustersResponse{" +
"jobClusters=" + jobClusters +
", requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
/**
* Invoked by user to update a job cluster
*
* @author njoshi
*/
public static final class UpdateJobClusterRequest extends BaseRequest {
private final JobClusterDefinitionImpl jobClusterDefinition;
private final String user;
public UpdateJobClusterRequest(
final JobClusterDefinitionImpl jobClusterDefinition,
String user) {
Preconditions.checkNotNull(jobClusterDefinition, "JobClusterDefinition cannot be null");
Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request");
this.jobClusterDefinition = jobClusterDefinition;
this.user = user;
}
public JobClusterDefinitionImpl getJobClusterDefinition() {
return jobClusterDefinition;
}
public String getUser() {
return user;
}
@Override
public String toString() {
return "UpdateJobClusterRequest{" +
"jobClusterDefinition=" + jobClusterDefinition +
", user='" + user + '\'' +
", requestId=" + requestId +
'}';
}
}
/**
* Indicates whether an update was successful
*
* @author njoshi
*/
public static final class UpdateJobClusterResponse extends BaseResponse {
public UpdateJobClusterResponse(
final long requestId,
final ResponseCode responseCode,
final String message) {
super(requestId, responseCode, message);
}
@Override
public String toString() {
return "UpdateJobClusterResponse{" +
"requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
/**
* Updates the SLA for the job cluster with an optional force enable option if cluster is disabled
*
* @author njoshi
*/
public static final class UpdateJobClusterSLARequest extends BaseRequest {
private final String clusterName;
private final int min;
private final int max;
private final String cronSpec;
private final IJobClusterDefinition.CronPolicy cronPolicy;
private final boolean forceEnable;
private final String user;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public UpdateJobClusterSLARequest(
@JsonProperty("name") final String name,
@JsonProperty("min") final Integer min,
@JsonProperty("max") final Integer max,
@JsonProperty("cronspec") final String cronSpec,
@JsonProperty("cronpolicy") final IJobClusterDefinition.CronPolicy cronPolicy,
@JsonProperty(value = "forceenable", defaultValue = "false") boolean forceEnable,
@JsonProperty("user") final String user) {
Preconditions.checkNotNull(min, "min");
Preconditions.checkNotNull(max, "max");
Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request");
Preconditions.checkArg(
name != null & !name.isEmpty(),
"Must provide job cluster name in request");
this.clusterName = name;
this.max = max;
this.min = min;
this.cronSpec = cronSpec;
this.cronPolicy = cronPolicy;
this.forceEnable = forceEnable;
this.user = user;
}
public UpdateJobClusterSLARequest(
final String name,
final int min,
final int max,
final String user) {
this(name, min, max, null, null, false, user);
}
public String getClusterName() {
return clusterName;
}
public int getMin() {
return min;
}
public int getMax() {
return max;
}
public String getCronSpec() {
return cronSpec;
}
public IJobClusterDefinition.CronPolicy getCronPolicy() {
return cronPolicy;
}
public boolean isForceEnable() {
return forceEnable;
}
public String getUser() {
return user;
}
@Override
public String toString() {
return "UpdateJobClusterSLARequest{" +
"clusterName='" + clusterName + '\'' +
", min=" + min +
", max=" + max +
", cronSpec='" + cronSpec + '\'' +
", cronPolicy=" + cronPolicy +
", forceEnable=" + forceEnable +
", user='" + user + '\'' +
", requestId=" + requestId +
'}';
}
}
public static final class UpdateJobClusterSLAResponse extends BaseResponse {
public UpdateJobClusterSLAResponse(
final long requestId,
final ResponseCode responseCode,
final String message) {
super(requestId, responseCode, message);
}
@Override
public String toString() {
return "UpdateJobClusterSLAResponse{" +
"requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
public static final class UpdateJobClusterLabelsRequest extends BaseRequest {
private final List<Label> labels;
private final String user;
private final String clusterName;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public UpdateJobClusterLabelsRequest(
@JsonProperty("name") final String clusterName,
@JsonProperty("labels") final List<Label> labels,
@JsonProperty("user") final String user) {
Preconditions.checkNotNull(labels, "labels");
Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request");
Preconditions.checkArg(
clusterName != null & !clusterName.isEmpty(),
"Must provide job cluster name in request");
this.labels = labels;
this.user = user;
this.clusterName = clusterName;
}
public List<Label> getLabels() {
return labels;
}
public String getUser() {
return user;
}
public String getClusterName() {
return clusterName;
}
@Override
public String toString() {
return "UpdateJobClusterLabelsRequest{" +
"labels=" + labels +
", user='" + user + '\'' +
", clusterName='" + clusterName + '\'' +
", requestId=" + requestId +
'}';
}
}
public static final class UpdateJobClusterLabelsResponse extends BaseResponse {
public UpdateJobClusterLabelsResponse(
final long requestId,
final ResponseCode responseCode,
final String message) {
super(requestId, responseCode, message);
}
@Override
public String toString() {
return "UpdateJobClusterLabelsResponse{" +
"requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
@ToString
@EqualsAndHashCode
@Getter
public static final class UpdateSchedulingInfoRequest extends BaseRequest {
private final SchedulingInfo schedulingInfo;
private final String version;
public UpdateSchedulingInfoRequest(
@JsonProperty("schedulingInfo") SchedulingInfo schedulingInfo,
@JsonProperty("version") final String version) {
this.schedulingInfo = schedulingInfo;
this.version = version;
}
}
public static final class UpdateJobClusterArtifactRequest extends BaseRequest {
private final String artifactName;
private final String version;
private final boolean skipSubmit;
private final String user;
private final String clusterName;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public UpdateJobClusterArtifactRequest(
@JsonProperty("name") final String clusterName,
@JsonProperty("url") final String artifact,
@JsonProperty("version") final String version,
@JsonProperty("skipsubmit") final boolean skipSubmit,
@JsonProperty("user") final String user) {
Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request");
Preconditions.checkArg(
clusterName != null & !clusterName.isEmpty(),
"Must provide job cluster name in request");
Preconditions.checkArg(
artifact != null && !artifact.isEmpty(),
"Artifact cannot be null or empty");
Preconditions.checkArg(
version != null && !version.isEmpty(),
"version cannot be null or empty");
this.clusterName = clusterName;
this.artifactName = artifact;
this.version = version;
this.skipSubmit = skipSubmit;
this.user = user;
}
public String getArtifactName() {
return artifactName;
}
public String getVersion() {
return version;
}
public boolean isSkipSubmit() {
return skipSubmit;
}
public String getUser() {
return user;
}
public String getClusterName() {
return clusterName;
}
@Override
public String toString() {
return "UpdateJobClusterArtifactRequest{" +
"artifactName='" + artifactName + '\'' +
", version='" + version + '\'' +
", skipSubmit=" + skipSubmit +
", user='" + user + '\'' +
", clusterName='" + clusterName + '\'' +
", requestId=" + requestId +
'}';
}
}
@EqualsAndHashCode
@ToString
public static final class UpdateSchedulingInfoResponse extends BaseResponse {
public UpdateSchedulingInfoResponse(
final long requestId,
final ResponseCode responseCode,
final String message) {
super(requestId, responseCode, message);
}
}
public static final class UpdateJobClusterArtifactResponse extends BaseResponse {
public UpdateJobClusterArtifactResponse(
final long requestId,
final ResponseCode responseCode,
final String message) {
super(requestId, responseCode, message);
}
@Override
public String toString() {
return "UpdateJobClusterArtifactResponse{" +
"requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
public static final class UpdateJobClusterWorkerMigrationStrategyRequest extends BaseRequest {
private final WorkerMigrationConfig migrationConfig;
private final String clusterName;
private final String user;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public UpdateJobClusterWorkerMigrationStrategyRequest(
@JsonProperty("name") final String clusterName,
@JsonProperty("migrationConfig") final WorkerMigrationConfig config,
@JsonProperty("user") final String user) {
Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request");
Preconditions.checkArg(
clusterName != null & !clusterName.isEmpty(),
"Must provide job cluster name in request");
Preconditions.checkNotNull(config, "migrationConfig");
this.migrationConfig = config;
this.clusterName = clusterName;
this.user = user;
}
public WorkerMigrationConfig getMigrationConfig() {
return migrationConfig;
}
public String getClusterName() {
return clusterName;
}
public String getUser() {
return user;
}
@Override
public String toString() {
return "UpdateJobClusterWorkerMigrationStrategyRequest{" +
"migrationConfig=" + migrationConfig +
", clusterName='" + clusterName + '\'' +
", user='" + user + '\'' +
", requestId=" + requestId +
'}';
}
}
public static final class UpdateJobClusterWorkerMigrationStrategyResponse extends BaseResponse {
public UpdateJobClusterWorkerMigrationStrategyResponse(
final long requestId,
final ResponseCode responseCode,
final String message) {
super(requestId, responseCode, message);
}
@Override
public String toString() {
return "UpdateJobClusterWorkerMigrationStrategyResponse{" +
"requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
/**
* Invoked by user.
* Kills all currently running jobs and puts itself in disabled state (also updates store)
* Any SLA enforcement is disabled
*
* @author njoshi
*/
public static final class DisableJobClusterRequest extends BaseRequest {
private final String user;
private final String clusterName;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public DisableJobClusterRequest(
@JsonProperty("name") String clusterName,
@JsonProperty("user") String user) {
Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request");
Preconditions.checkArg(
clusterName != null & !clusterName.isEmpty(),
"Must provide job cluster name in request");
this.user = user;
this.clusterName = clusterName;
}
public String getUser() {
return user;
}
public String getClusterName() {
return clusterName;
}
@Override
public String toString() {
return "DisableJobClusterRequest{" +
"user='" + user + '\'' +
", clusterName='" + clusterName + '\'' +
", requestId=" + requestId +
'}';
}
}
/**
* Whether a disable request was successful
*
* @author njoshi
*/
public static final class DisableJobClusterResponse extends BaseResponse {
public DisableJobClusterResponse(
final long requestId,
final ResponseCode responseCode,
final String message) {
super(requestId, responseCode, message);
}
@Override
public String toString() {
return "DisableJobClusterResponse{" +
"requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
/**
* Enables the job cluster. Restarts SLA enforcement logic and updates store.
*
* @author njoshi
*/
public static final class EnableJobClusterRequest extends BaseRequest {
private final String user;
private final String clusterName;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public EnableJobClusterRequest(
@JsonProperty("name") final String clusterName,
@JsonProperty("user") final String user) {
Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request");
Preconditions.checkArg(
clusterName != null & !clusterName.isEmpty(),
"Must provide job cluster name in request");
this.user = user;
this.clusterName = clusterName;
}
public String getUser() {
return user;
}
public String getClusterName() {
return clusterName;
}
@Override
public String toString() {
return "EnableJobClusterRequest{" +
"user='" + user + '\'' +
", clusterName='" + clusterName + '\'' +
", requestId=" + requestId +
'}';
}
}
/**
* Whether enable was successfull
*
* @author njoshi
*/
public static final class EnableJobClusterResponse extends BaseResponse {
public EnableJobClusterResponse(
final long requestId,
final ResponseCode responseCode,
final String message) {
super(requestId, responseCode, message);
}
@Override
public String toString() {
return "EnableJobClusterResponse{" +
"requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
/**
* Request the job cluster definition
*
* @author njoshi
*/
public static final class GetJobClusterRequest extends BaseRequest {
private final String jobClusterName;
public GetJobClusterRequest(final String name) {
super();
Preconditions.checkArg(
name != null && !name.isEmpty(),
"Jobcluster name cannot be null or empty");
this.jobClusterName = name;
}
public String getJobClusterName() {
return jobClusterName;
}
@Override
public String toString() {
return "GetJobClusterRequest{" +
"jobClusterName='" + jobClusterName + '\'' +
", requestId=" + requestId +
'}';
}
}
/**
* Response to the getJobClusterRequest with the actual job cluster definition.
*
* @author njoshi
*/
public static final class GetJobClusterResponse extends BaseResponse {
private final Optional<MantisJobClusterMetadataView> jobClusterOp;
public GetJobClusterResponse(
long requestId,
ResponseCode responseCode,
String message,
Optional<MantisJobClusterMetadataView> jobClusterOp) {
super(requestId, responseCode, message);
Preconditions.checkNotNull(jobClusterOp, "Job cluster cannot be null");
this.jobClusterOp = jobClusterOp;
}
public Optional<MantisJobClusterMetadataView> getJobCluster() {
return jobClusterOp;
}
@Override
public String toString() {
return "GetJobClusterResponse{" +
"jobClusterOp=" + jobClusterOp +
", requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
public static final class ListJobCriteria {
private final Optional<Integer> limit;
private final Optional<JobState.MetaState> jobState;
private final List<Integer> stageNumberList;
private final List<Integer> workerIndexList;
private final List<Integer> workerNumberList;
private final List<WorkerState.MetaState> workerStateList;
private final Optional<Boolean> activeOnly;
private final Optional<String> matchingRegex;
private final List<Label> matchingLabels;
private final Optional<String> labelsOperand;
public ListJobCriteria(
final Optional<Integer> limit,
final Optional<JobState.MetaState> jobState,
final List<Integer> stageNumber,
final List<Integer> workerIndex,
final List<Integer> workerNumber,
final List<WorkerState.MetaState> workerState,
final Optional<Boolean> activeOnly,
final Optional<String> matchingRegex,
final Optional<String> matchingLabels,
final Optional<String> labelsOperand) {
this.limit = limit;
this.jobState = jobState;
this.stageNumberList = stageNumber;
this.workerIndexList = workerIndex;
this.workerNumberList = workerNumber;
this.workerStateList = workerState;
this.activeOnly = activeOnly;
this.matchingRegex = matchingRegex;
this.matchingLabels = matchingLabels.map(query -> LabelUtils.generatePairs(query))
.orElse(Collections.emptyList());
this.labelsOperand = labelsOperand;
}
public ListJobCriteria() {
this(
Optional.empty(),
Optional.empty(),
Lists.newArrayList(),
Lists.newArrayList(),
Lists.newArrayList(),
Lists.newArrayList(),
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.empty());
}
public Optional<Integer> getLimit() {
return limit;
}
public Optional<JobState.MetaState> getJobState() {
return jobState;
}
public List<Integer> getStageNumberList() {
return stageNumberList;
}
public List<Integer> getWorkerIndexList() {
return workerIndexList;
}
public List<Integer> getWorkerNumberList() {
return workerNumberList;
}
public List<WorkerState.MetaState> getWorkerStateList() {
return workerStateList;
}
public Optional<Boolean> getActiveOnly() {
return activeOnly;
}
public Optional<String> getMatchingRegex() {
return matchingRegex;
}
public List<Label> getMatchingLabels() {
return matchingLabels;
}
public Optional<String> getLabelsOperand() {
return labelsOperand;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final ListJobCriteria that = (ListJobCriteria) o;
return Objects.equals(limit, that.limit) &&
Objects.equals(jobState, that.jobState) &&
Objects.equals(stageNumberList, that.stageNumberList) &&
Objects.equals(workerIndexList, that.workerIndexList) &&
Objects.equals(workerNumberList, that.workerNumberList) &&
Objects.equals(workerStateList, that.workerStateList) &&
Objects.equals(activeOnly, that.activeOnly) &&
Objects.equals(matchingRegex, that.matchingRegex) &&
Objects.equals(matchingLabels, that.matchingLabels) &&
Objects.equals(labelsOperand, that.labelsOperand);
}
@Override
public int hashCode() {
return Objects.hash(
limit,
jobState,
stageNumberList,
workerIndexList,
workerNumberList,
workerStateList,
activeOnly,
matchingRegex,
matchingLabels,
labelsOperand);
}
@Override
public String toString() {
return "ListJobCriteria{" +
"limit=" + limit +
", jobState=" + jobState +
", stageNumberList=" + stageNumberList +
", workerIndexList=" + workerIndexList +
", workerNumberList=" + workerNumberList +
", workerStateList=" + workerStateList +
", activeOnly=" + activeOnly +
", matchingRegex=" + matchingRegex +
", matchingLabels=" + matchingLabels +
", labelsOperand=" + labelsOperand +
'}';
}
}
/**
* Request a list of job metadata based on different criteria
*/
public static class ListJobsRequest extends BaseRequest {
private final ListJobCriteria filters;
public ListJobsRequest(final ListJobCriteria filters) {
this.filters = filters;
}
public ListJobsRequest() {
this(new ListJobCriteria());
}
public ListJobsRequest(final String clusterName) {
this(new ListJobCriteria(
Optional.empty(),
Optional.empty(),
Lists.newArrayList(),
Lists.newArrayList(),
Lists.newArrayList(),
Lists.newArrayList(),
Optional.empty(),
Optional.ofNullable(clusterName),
Optional.empty(),
Optional.empty()));
}
public ListJobCriteria getCriteria() {
return filters;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final ListJobsRequest that = (ListJobsRequest) o;
return Objects.equals(filters, that.filters);
}
@Override
public int hashCode() {
return Objects.hash(filters);
}
@Override
public String toString() {
return "ListJobsRequest{" +
"filters=" + filters +
", requestId=" + requestId +
'}';
}
}
public static final class ListJobsResponse extends BaseResponse {
private final List<MantisJobMetadataView> jobs;
public ListJobsResponse(
long requestId,
ResponseCode responseCode,
String message,
List<MantisJobMetadataView> list) {
super(requestId, responseCode, message);
Preconditions.checkNotNull(list, "job ids list cannot be null");
this.jobs = list;
}
public List<MantisJobMetadataView> getJobList() {
return jobs;
}
public <R> ListObject<R> getJobList(Function<MantisJobMetadataView, R> func,
Class<R> classType,
Integer pageSize,
Integer offset,
String sortField,
Boolean sortAscending,
Uri uri) {
List<R> mappedList = jobs.stream().map(func).collect(Collectors.toList());
return getTransformedJobList(mappedList,
classType,
pageSize,
offset,
sortField,
sortAscending,
uri);
}
public ListObject<MantisJobMetadataView> getJobList(
Integer pageSize,
Integer offset,
String sortField,
Boolean sortAscending,
Uri uri) {
return getTransformedJobList(jobs,
MantisJobMetadataView.class,
pageSize,
offset,
sortField,
sortAscending,
uri);
}
private <T> ListObject<T> getTransformedJobList(
List<T> list,
Class<T> classType,
Integer pageSize,
Integer offset,
String sortField,
Boolean sortAscending,
Uri uri) {
ListObject.Builder<T> builder = new ListObject.Builder<T>().withObjects(list, classType);
if (uri != null) {
builder = builder.withUri(uri);
}
if (pageSize != null) {
builder = builder.withLimit(pageSize);
}
if (offset != null) {
builder = builder.withOffset(offset);
}
if (sortAscending != null) {
builder = builder.withSortAscending(sortAscending);
}
if (sortField != null) {
builder = builder.withSortField(sortField);
}
return builder.build();
}
@Override
public String toString() {
return "ListJobsResponse{" +
"jobs=" + jobs +
", requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
/**
* Request a list of job IDs based on different criteria
*/
public static final class ListJobIdsRequest extends BaseRequest {
public final ListJobCriteria filters;
public ListJobIdsRequest(
final Optional<Integer> limit,
final Optional<JobState.MetaState> jobState,
final Optional<Boolean> activeOnly,
final Optional<String> matchingRegex,
final Optional<String> matchingLabels,
final Optional<String> labelsOperand) {
super();
filters = new ListJobCriteria(
limit,
jobState,
Collections.emptyList(),
Collections.emptyList(),
Collections.emptyList(),
Collections.emptyList(),
activeOnly,
matchingRegex,
matchingLabels,
labelsOperand);
}
public ListJobIdsRequest() {
this(
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.empty());
}
public ListJobCriteria getCriteria() {
return this.filters;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final ListJobIdsRequest that = (ListJobIdsRequest) o;
return Objects.equals(filters, that.filters);
}
@Override
public int hashCode() {
return Objects.hash(filters);
}
@Override
public String toString() {
return "ListJobIdsRequest{" +
"filters=" + filters +
", requestId=" + requestId +
'}';
}
}
public static final class ListJobIdsResponse extends BaseResponse {
private final List<JobIdInfo> jobIds;
public ListJobIdsResponse(
long requestId,
ResponseCode responseCode,
String message,
List<JobIdInfo> list) {
super(requestId, responseCode, message);
Preconditions.checkNotNull(list, "job ids list cannot be null");
this.jobIds = list;
}
public List<JobIdInfo> getJobIds() {
return jobIds;
}
@Override
public String toString() {
return "ListJobIdsResponse{" +
"jobIds=" + jobIds +
", requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
/**
* Request a list of archived workers for the given job ID
*/
public static final class ListArchivedWorkersRequest extends BaseRequest {
public static final int DEFAULT_LIST_ARCHIVED_WORKERS_LIMIT = 100;
private final JobId jobId;
private final int limit;
public ListArchivedWorkersRequest(final JobId jobId) {
this(jobId, DEFAULT_LIST_ARCHIVED_WORKERS_LIMIT);
}
public ListArchivedWorkersRequest(final JobId jobId, int limit) {
Preconditions.checkNotNull(jobId, "JobId");
this.jobId = jobId;
this.limit = limit;
}
public JobId getJobId() {
return jobId;
}
public int getLimit() {
return limit;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final ListArchivedWorkersRequest that = (ListArchivedWorkersRequest) o;
return limit == that.limit &&
Objects.equals(jobId, that.jobId);
}
@Override
public int hashCode() {
return Objects.hash(jobId, limit);
}
@Override
public String toString() {
return "ListArchivedWorkersRequest{" +
"jobId=" + jobId +
", limit=" + limit +
", requestId=" + requestId +
'}';
}
}
public static final class ListArchivedWorkersResponse extends BaseResponse {
private final List<IMantisWorkerMetadata> mantisWorkerMetadata;
public ListArchivedWorkersResponse(
long requestId,
ResponseCode responseCode,
String message,
List<IMantisWorkerMetadata> list) {
super(requestId, responseCode, message);
Preconditions.checkNotNull(list, "worker metadata list cannot be null");
this.mantisWorkerMetadata = list;
}
public List<IMantisWorkerMetadata> getWorkerMetadata() {
return mantisWorkerMetadata;
}
public ListObject<IMantisWorkerMetadata> getWorkerMetadata(
Integer pageSize,
Integer offset,
String sortField,
Boolean sortAscending,
Uri uri) {
return getTransformedWorkerMetadata(mantisWorkerMetadata,
IMantisWorkerMetadata.class,
pageSize,
offset,
sortField,
sortAscending,
uri);
}
public <R> ListObject<R> getWorkerMetadata(
Function<IMantisWorkerMetadata, R> func,
Class<R> classType,
Integer pageSize,
Integer offset,
String sortField,
Boolean sortAscending,
Uri uri) {
List<R> mappedList = mantisWorkerMetadata.stream().map(func).collect(Collectors.toList());
return getTransformedWorkerMetadata(mappedList,
classType,
pageSize,
offset,
sortField,
sortAscending,
uri);
}
private <T> ListObject<T> getTransformedWorkerMetadata(
List<T> list,
Class<T> classType,
Integer pageSize,
Integer offset,
String sortField,
Boolean sortAscending,
Uri uri) {
ListObject.Builder<T> builder = new ListObject.Builder<T>()
.withObjects(list, classType);
if (pageSize != null) {
builder = builder.withLimit(pageSize);
}
if (offset != null) {
builder = builder.withOffset(offset);
}
if (sortField != null) {
builder = builder.withSortField(sortField);
}
if (sortAscending != null) {
builder = builder.withSortAscending(sortAscending);
}
if (uri != null) {
builder = builder.withUri(uri);
}
return builder.build();
}
@Override
public String toString() {
return "ListArchivedWorkersResponse{" +
"mantisWorkerMetadata=" + mantisWorkerMetadata +
", requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
public static final class ListWorkersRequest extends BaseRequest {
public static final int DEFAULT_LIST_WORKERS_LIMIT = 100;
private final JobId jobId;
private final int limit;
public ListWorkersRequest(final JobId jobId) {
this(jobId, DEFAULT_LIST_WORKERS_LIMIT);
}
public ListWorkersRequest(final JobId jobId, int limit) {
this.jobId = jobId;
this.limit = limit;
}
public JobId getJobId() {
return jobId;
}
public int getLimit() {
return limit;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final ListWorkersRequest that = (ListWorkersRequest) o;
return limit == that.limit &&
Objects.equals(jobId, that.jobId);
}
@Override
public int hashCode() {
return Objects.hash(jobId, limit);
}
@Override
public String toString() {
return "ListWorkersRequest{" +
"jobId=" + jobId +
", limit=" + limit +
", requestId=" + requestId +
'}';
}
}
public static final class ListWorkersResponse extends BaseResponse {
private final List<IMantisWorkerMetadata> mantisWorkerMetadata;
public ListWorkersResponse(
long requestId,
ResponseCode responseCode,
String message,
List<IMantisWorkerMetadata> list) {
super(requestId, responseCode, message);
Preconditions.checkNotNull(list, "worker metadata list cannot be null");
this.mantisWorkerMetadata = list;
}
public List<IMantisWorkerMetadata> getWorkerMetadata() {
return mantisWorkerMetadata;
}
@Override
public String toString() {
return "ListWorkersResponse{" +
"mantisWorkerMetadata=" + mantisWorkerMetadata +
", requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
/**
* Request a list of completed job Ids in this cluster
*
* @author njoshi
*/
public static final class ListCompletedJobsInClusterRequest extends BaseRequest {
private final String clusterName;
private final int limit;
public ListCompletedJobsInClusterRequest(final String name) {
this(name, 100);
}
public ListCompletedJobsInClusterRequest(final String name, final int limit) {
super();
Preconditions.checkArg(
name != null && !name.isEmpty(),
"Jobcluster name cannot be null or empty");
this.clusterName = name;
this.limit = limit;
}
public int getLimit() {
return this.limit;
}
public String getClusterName() {
return clusterName;
}
@Override
public String toString() {
return "ListCompletedJobsInClusterRequest{" +
"clusterName='" + clusterName + '\'' +
", limit=" + limit +
", requestId=" + requestId +
'}';
}
}
public static final class ListCompletedJobsInClusterResponse extends BaseResponse {
private final List<CompletedJob> completedJobs;
public ListCompletedJobsInClusterResponse(
long requestId,
ResponseCode responseCode,
String message,
List<CompletedJob> completedJobs) {
super(requestId, responseCode, message);
this.completedJobs = completedJobs;
}
public List<CompletedJob> getCompletedJobs() {
return completedJobs;
}
@Override
public String toString() {
return "ListCompletedJobsInClusterResponse [completedJobs=" + completedJobs + "]";
}
}
@ToString
public static final class SubmitJobRequest extends BaseRequest {
private final Optional<JobDefinition> jobDefinition;
private final String submitter;
private final String clusterName;
private final boolean isAutoResubmit;
private final boolean submitLatest;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public SubmitJobRequest(
@JsonProperty("name") final String clusterName,
@JsonProperty("user") final String user,
@JsonProperty(value = "jobDefinition") final Optional<JobDefinition> jobDefinition,
@JsonProperty("submitLatestJobCluster") final boolean submitLatest) {
super();
Preconditions.checkArg(user != null && !user.isEmpty(), "Must provide user in request");
Preconditions.checkArg(
clusterName != null && !clusterName.isEmpty(),
"Must provide job cluster name in request");
Preconditions.checkNotNull(jobDefinition, "jobDefinition");
this.jobDefinition = jobDefinition;
this.submitter = user;
this.clusterName = clusterName;
this.isAutoResubmit = false;
this.submitLatest = submitLatest;
}
public SubmitJobRequest(String clusterName, String submitter, JobDefinition jobDefinition) {
this(clusterName, submitter, false, Optional.of(jobDefinition));
}
//quick submit
public SubmitJobRequest(final String clusterName, final String user) {
this(clusterName, user, false, Optional.empty());
}
// used to during sla enforcement
public SubmitJobRequest(
final String clusterName,
final String user,
boolean isAutoResubmit,
final Optional<JobDefinition> jobDefinition) {
super();
Preconditions.checkArg(user != null && !user.isEmpty(), "Must provide user in request");
Preconditions.checkArg(clusterName != null && !clusterName.isEmpty(),
"Must provide job cluster name in request");
this.jobDefinition = jobDefinition;
this.submitter = user;
this.clusterName = clusterName;
this.isAutoResubmit = isAutoResubmit;
this.submitLatest = false;
}
public Optional<JobDefinition> getJobDefinition() {
return jobDefinition;
}
public String getSubmitter() {
return submitter;
}
public String getClusterName() {
return clusterName;
}
public boolean isAutoResubmit() {
return isAutoResubmit;
}
public boolean isSubmitLatest() {
return submitLatest;
}
}
public static final class SubmitJobResponse extends BaseResponse {
private final Optional<JobId> jobId;
public SubmitJobResponse(
final long requestId,
final ResponseCode responseCode,
final String message,
final Optional<JobId> jobId) {
super(requestId, responseCode, message);
this.jobId = jobId;
}
public Optional<JobId> getJobId() {
return jobId;
}
@Override
public String toString() {
return "SubmitJobResponse{" +
"jobId=" + jobId +
", requestId=" + requestId +
", responseCode=" + responseCode +
", message='" + message + '\'' +
'}';
}
}
public static final class GetJobDetailsRequest extends BaseRequest {
private final String user;
private final JobId jobId;
public GetJobDetailsRequest(final String user, final JobId jobId) {
super();
this.jobId = jobId;
this.user = user;
}
public GetJobDetailsRequest(final String user, final String jobId) {
super();
Preconditions.checkNotNull(user, "user");
Preconditions.checkArg(
jobId != null && !jobId.isEmpty(),
"Must provide job ID in request");
Optional<JobId> jOp = JobId.fromId(jobId);
if (jOp.isPresent()) {
this.jobId = jOp.get();
} else {
throw new IllegalArgumentException(
String.format("Invalid jobId %s. JobId must be in the format [JobCLusterName-NumericID]", jobId));
}
this.user = user;
}
public JobId getJobId() {
return jobId;
}
public String getUser() {
return user;
}
@Override
public String toString() {
return "GetJobDetailsRequest [jobId=" + jobId + ", user=" + user + "]";
}
}
/**
* This request is sent to a job actor asking the job actor to use its current state to merge live metadata e.g. stage
* instance count to the given job definition object for job submission process to inherit from target job actor.
*/
@Getter
@ToString
public static final class GetJobDefinitionUpdatedFromJobActorRequest extends BaseRequest {
private final String user;
private final JobId jobId;
private final JobDefinition jobDefinition;
private final boolean isQuickSubmit;
private final boolean isAutoResubmit;
private final ActorRef originalSender;
public GetJobDefinitionUpdatedFromJobActorRequest(final String user,
final JobId jobId,
final JobDefinition jobDefinition,
final boolean isQuickSubmit,
final boolean isAutoResubmit,
final ActorRef originalSender) {
super();
Preconditions.checkNotNull(user, "user");
Preconditions.checkNotNull(jobId, "jobId");
Preconditions.checkNotNull(originalSender, "originalSender");
Preconditions.checkNotNull(jobDefinition, "jobDefinition");
this.jobId = jobId;
this.user = user;
this.jobDefinition = jobDefinition;
this.isQuickSubmit = isQuickSubmit;
this.isAutoResubmit = isAutoResubmit;
this.originalSender = originalSender;
}
}
public static final class GetJobDetailsResponse extends BaseResponse {
private final Optional<IMantisJobMetadata> jobMetadata;
public GetJobDetailsResponse(
final long requestId,
final ResponseCode responseCode,
final String message,
final Optional<IMantisJobMetadata> jobMetadata) {
super(requestId, responseCode, message);
this.jobMetadata = jobMetadata;
}
public Optional<IMantisJobMetadata> getJobMetadata() {
return jobMetadata;
}
@Override
public String toString() {
return "GetJobDetailsResponse [jobMetadata=" + jobMetadata + "]";
}
}
@ToString
@Getter
public static final class GetJobDefinitionUpdatedFromJobActorResponse extends BaseResponse {
private final String user;
private final JobDefinition jobDefinition;
private final boolean isAutoResubmit;
private final boolean isQuickSubmitMode;
private final ActorRef originalSender;
public GetJobDefinitionUpdatedFromJobActorResponse(
final long requestId,
final ResponseCode responseCode,
final String message,
final String user,
final JobDefinition jobDefn,
final boolean isAutoResubmit,
final boolean isQuickSubmitMode,
final ActorRef originalSender) {
super(requestId, responseCode, message);
Preconditions.checkNotNull(user, "user");
Preconditions.checkNotNull(originalSender, "originalSender");
this.user = user;
this.jobDefinition = jobDefn;
this.isAutoResubmit = isAutoResubmit;
this.isQuickSubmitMode = isQuickSubmitMode;
this.originalSender = originalSender;
}
}
public static final class GetLatestJobDiscoveryInfoRequest extends BaseRequest {
private final String jobCluster;
public GetLatestJobDiscoveryInfoRequest(final String jobCluster) {
Preconditions.checkNotNull(jobCluster, "jobCluster");
this.jobCluster = jobCluster;
}
public String getJobCluster() {
return jobCluster;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
GetLatestJobDiscoveryInfoRequest that = (GetLatestJobDiscoveryInfoRequest) o;
return Objects.equals(jobCluster, that.jobCluster);
}
@Override
public int hashCode() {
return Objects.hash(jobCluster);
}
@Override
public String toString() {
return "GetLatestJobDiscoveryInfoRequest{" +
"jobCluster='" + jobCluster + '\'' +
'}';
}
}
public static final class GetLatestJobDiscoveryInfoResponse extends BaseResponse {
private final Optional<JobSchedulingInfo> jobSchedulingInfo;
public GetLatestJobDiscoveryInfoResponse(
final long requestId,
final ResponseCode code,
final String msg,
final Optional<JobSchedulingInfo> jobSchedulingInfo) {
super(requestId, code, msg);
this.jobSchedulingInfo = jobSchedulingInfo;
}
public Optional<JobSchedulingInfo> getDiscoveryInfo() {
return jobSchedulingInfo;
}
}
public static final class GetJobSchedInfoRequest extends BaseRequest {
private final JobId jobId;
public GetJobSchedInfoRequest(final JobId jobId) {
this.jobId = jobId;
}
public JobId getJobId() {
return jobId;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final GetJobSchedInfoRequest that = (GetJobSchedInfoRequest) o;
return Objects.equals(jobId, that.jobId);
}
@Override
public int hashCode() {
return Objects.hash(jobId);
}
@Override
public String toString() {
return "GetJobStatusSubjectRequest{" +
"jobId=" + jobId +
'}';
}
}
public static final class GetJobSchedInfoResponse extends BaseResponse {
private final Optional<BehaviorSubject<JobSchedulingInfo>> jobStatusSubject;
public GetJobSchedInfoResponse(
final long requestId,
final ResponseCode code,
final String msg,
final Optional<BehaviorSubject<JobSchedulingInfo>> statusSubject) {
super(requestId, code, msg);
this.jobStatusSubject = statusSubject;
}
public Optional<BehaviorSubject<JobSchedulingInfo>> getJobSchedInfoSubject() {
return jobStatusSubject;
}
}
/**
* Stream of JobId submissions for a cluster
*/
public static final class GetLastSubmittedJobIdStreamRequest extends BaseRequest {
private final String clusterName;
public GetLastSubmittedJobIdStreamRequest(final String clusterName) {
Preconditions.checkArg(
clusterName != null & !clusterName.isEmpty(),
"Must provide job cluster name in request");
this.clusterName = clusterName;
}
public String getClusterName() {
return clusterName;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final GetLastSubmittedJobIdStreamRequest that = (GetLastSubmittedJobIdStreamRequest) o;
return Objects.equals(clusterName, that.clusterName);
}
@Override
public int hashCode() {
return Objects.hash(clusterName);
}
@Override
public String toString() {
return "GetLastSubmittedJobIdStreamRequest{" +
"clusterName='" + clusterName + '\'' +
'}';
}
}
public static final class GetLastSubmittedJobIdStreamResponse extends BaseResponse {
private final Optional<BehaviorSubject<JobId>> jobIdBehaviorSubject;
public GetLastSubmittedJobIdStreamResponse(
final long requestId,
final ResponseCode code,
final String msg,
final Optional<BehaviorSubject<JobId>> jobIdBehaviorSubject) {
super(requestId, code, msg);
this.jobIdBehaviorSubject = jobIdBehaviorSubject;
}
public Optional<BehaviorSubject<JobId>> getjobIdBehaviorSubject() {
return this.jobIdBehaviorSubject;
}
}
public static final class KillJobRequest extends BaseRequest {
private final JobId jobId;
private final String reason;
private final String user;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public KillJobRequest(
@JsonProperty("JobId") final String jobId,
@JsonProperty("reason") final String reason,
@JsonProperty("user") final String user) {
super();
Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request");
Preconditions.checkArg(
jobId != null & !jobId.isEmpty(),
"Must provide job ID in request");
this.jobId = JobId.fromId(jobId).get();
this.reason = Optional.ofNullable(reason).orElse("");
this.user = user;
}
public JobId getJobId() {
return jobId;
}
public String getReason() {
return reason;
}
public String getUser() {
return user;
}
@Override
public String toString() {
return "KillJobRequest [jobId=" + jobId + ", reason=" + reason + ", user=" + user + "]";
}
}
public static final class KillJobResponse extends BaseResponse {
private final JobId jobId;
private final JobState state;
private final String user;
public KillJobResponse(
long requestId,
ResponseCode responseCode,
JobState state,
String message,
JobId jobId,
String user) {
super(requestId, responseCode, message);
this.jobId = jobId;
this.state = state;
this.user = user;
}
public JobId getJobId() {
return jobId;
}
public JobState getState() {
return state;
}
public String getUser() {
return user;
}
@Override
public String toString() {
return "KillJobResponse [jobId=" + jobId + ", state=" + state + ", user="
+ user + "]";
}
}
public static final class ScaleStageRequest extends BaseRequest {
private final int stageNum;
private final int numWorkers;
private final String user;
private final String reason;
private final JobId jobId;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public ScaleStageRequest(
@JsonProperty("JobId") final String jobId,
@JsonProperty("StageNumber") final Integer stageNo,
@JsonProperty("NumWorkers") final Integer numWorkers,
@JsonProperty("User") final String user,
@JsonProperty("Reason") final String reason) {
super();
Preconditions.checkArg(
jobId != null & !jobId.isEmpty(),
"Must provide job ID in request");
Preconditions.checkArg(stageNo > 0, "Invalid stage Number " + stageNo);
Preconditions.checkArg(
numWorkers != null && numWorkers > 0,
"NumWorkers must be greater than 0");
this.stageNum = stageNo;
this.numWorkers = numWorkers;
this.user = Optional.ofNullable(user).orElse("UserNotKnown");
this.reason = Optional.ofNullable(reason).orElse("");
this.jobId = JobId.fromId(jobId).get();
}
public int getStageNum() {
return stageNum;
}
public int getNumWorkers() {
return numWorkers;
}
public String getUser() {
return user;
}
public String getReason() {
return reason;
}
public JobId getJobId() {
return jobId;
}
@Override
public String toString() {
return "ScaleStageRequest{" +
"stageNum=" + stageNum +
", numWorkers=" + numWorkers +
", user='" + user + '\'' +
", reason='" + reason + '\'' +
", jobId=" + jobId +
'}';
}
}
public static final class ScaleStageResponse extends BaseResponse {
private final int actualNumWorkers;
public ScaleStageResponse(
final long requestId,
final ResponseCode responseCode,
final String message,
final int actualNumWorkers) {
super(requestId, responseCode, message);
this.actualNumWorkers = actualNumWorkers;
}
public int getActualNumWorkers() {
return actualNumWorkers;
}
@Override
public String toString() {
return "ScaleStageResponse{" +
"actualNumWorkers=" + actualNumWorkers +
'}';
}
}
public static final class ResubmitWorkerRequest extends BaseRequest {
private final String user;
private final JobId jobId;
private final int workerNum;
private final Optional<String> reason;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public ResubmitWorkerRequest(
@JsonProperty("JobId") final String jobIdStr,
@JsonProperty("workerNumber") final Integer workerNum,
@JsonProperty("user") final String user,
@JsonProperty("reason") final Optional<String> reason) {
super();
Preconditions.checkArg(
jobIdStr != null & !jobIdStr.isEmpty(),
"Must provide job ID in request");
Preconditions.checkNotNull(workerNum, "workerNumber");
Preconditions.checkArg(workerNum > 0, "Worker number must be greater than 0");
this.jobId = JobId.fromId(jobIdStr)
.orElseThrow(() -> new IllegalArgumentException(
"invalid JobID in resubmit worker request " + jobIdStr));
this.workerNum = workerNum;
this.user = user;
this.reason = reason;
}
public JobId getJobId() {
return jobId;
}
public int getWorkerNum() {
return workerNum;
}
public String getUser() {
return user;
}
public Optional<String> getReason() {
return reason;
}
@Override
public String toString() {
return "ResubmitWorkerRequest{" +
"user='" + user + '\'' +
", jobId=" + jobId +
", workerNum=" + workerNum +
", reason=" + reason +
'}';
}
}
public static final class V1ResubmitWorkerRequest extends BaseRequest {
private final String user;
private final int workerNum;
private final Optional<String> reason;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public V1ResubmitWorkerRequest(
@JsonProperty("workerNumber") final Integer workerNum,
@JsonProperty("user") final String user,
@JsonProperty("reason") final Optional<String> reason) {
super();
Preconditions.checkNotNull(workerNum, "workerNumber");
Preconditions.checkArg(workerNum > 0, "Worker number must be greater than 0");
this.workerNum = workerNum;
this.user = user;
this.reason = reason;
}
public int getWorkerNum() {
return workerNum;
}
public String getUser() {
return user;
}
public Optional<String> getReason() {
return reason;
}
@Override
public String toString() {
return "ResubmitWorkerRequest{" +
"user='" + user + '\'' +
", workerNum=" + workerNum +
", reason=" + reason +
'}';
}
}
public static final class ResubmitWorkerResponse extends BaseResponse {
public ResubmitWorkerResponse(
final long requestId,
final ResponseCode responseCode,
final String message) {
super(requestId, responseCode, message);
}
@Override
public String toString() {
return "ResubmitWorkerResponse [requestId=" + requestId + ", respCode=" + responseCode +
", message="
+ message + "]";
}
}
}
| 8,053 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/proto/BaseRequest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.proto;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import java.util.concurrent.atomic.AtomicLong;
public class BaseRequest {
@JsonIgnore
private static final AtomicLong counter = new AtomicLong(0);
@JsonIgnore
public final long requestId;
public BaseRequest(long requestId) {
this.requestId = requestId;
}
public BaseRequest() {
this.requestId = counter.getAndIncrement();
}
}
| 8,054 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/IMantisWorkerEventProcessor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.WorkerEvent;
/**
* Declares behavior the Mantis worker event processor which is responsible for managing
* Worker state.
*/
public interface IMantisWorkerEventProcessor {
/**
* Handles state transition for a worker.
* @param event
* @param jobStore
* @throws Exception
*/
public void processEvent(WorkerEvent event, MantisJobStore jobStore) throws Exception;
}
| 8,055 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/WorkerResubmitRateLimiter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.config.ConfigurationProvider;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is not ThreadSafe. It is expected to be invoked by the JobActor
* (which should guarantee no concurrent invocations.
*/
/* package */class WorkerResubmitRateLimiter {
private static final Logger LOGGER = LoggerFactory.getLogger(WorkerResubmitRateLimiter.class);
private static final String DEFAULT_WORKER_RESUBMIT_INTERVAL_SECS_STR = "5:10:20";
private final Map<String, ResubmitRecord> resubmitRecords = new HashMap<>();
private static final long DEFAULT_EXPIRE_RESUBMIT_DELAY_SECS = 300;
private static final long DEFAULT_EXPIRE_RESUBMIT_DELAY_EXECUTION_INTERVAL_SECS = 120;
private static final long DEFAULT_RESUBMISSION_INTERVAL_SECS = 10;
private final long expireResubmitDelaySecs;
private final long[] resubmitIntervalSecs;
/**
* Constructor for this class.
* @param workerResubmitIntervalSecs
* @param expireResubmitDelaySecs
*/
WorkerResubmitRateLimiter(String workerResubmitIntervalSecs, long expireResubmitDelaySecs) {
Preconditions.checkArg(expireResubmitDelaySecs > 0, "Expire "
+ "Resubmit Delay cannot be 0 or less");
if (workerResubmitIntervalSecs == null || workerResubmitIntervalSecs.isEmpty())
workerResubmitIntervalSecs = DEFAULT_WORKER_RESUBMIT_INTERVAL_SECS_STR;
StringTokenizer tokenizer = new StringTokenizer(workerResubmitIntervalSecs, ":");
if (tokenizer.countTokens() == 0) {
this.resubmitIntervalSecs = new long[2];
this.resubmitIntervalSecs[0] = 0L;
this.resubmitIntervalSecs[1] = DEFAULT_RESUBMISSION_INTERVAL_SECS;
} else {
this.resubmitIntervalSecs = new long[tokenizer.countTokens() + 1];
this.resubmitIntervalSecs[0] = 0L;
for (int i = 1; i < this.resubmitIntervalSecs.length; i++) {
final String s = tokenizer.nextToken();
try {
this.resubmitIntervalSecs[i] = Long.parseLong(s);
} catch (NumberFormatException e) {
LOGGER.warn("Invalid number for resubmit interval " + s + ": using default "
+ DEFAULT_RESUBMISSION_INTERVAL_SECS);
this.resubmitIntervalSecs[i] = DEFAULT_RESUBMISSION_INTERVAL_SECS;
}
}
}
this.expireResubmitDelaySecs = expireResubmitDelaySecs;
}
/**
* Default constructor.
*/
WorkerResubmitRateLimiter() {
this(ConfigurationProvider.getConfig().getWorkerResubmitIntervalSecs(),
ConfigurationProvider.getConfig().getExpireWorkerResubmitDelaySecs());
}
/**
* Called periodically by Job Actor to purge old records.
*
* @param currentTime
*/
public void expireResubmitRecords(long currentTime) {
Iterator<ResubmitRecord> it = resubmitRecords.values().iterator();
while (it.hasNext()) {
ResubmitRecord record = it.next();
if (record.getResubmitAt() - record.getDelayedBy() < (currentTime - this.expireResubmitDelaySecs * 1000))
it.remove();
}
}
/**
* Given a resubmit record pick the next delay in the array of delay in seconds configured.
*
* @param resubmitRecord
*
* @return
*/
long evalDelay(final ResubmitRecord resubmitRecord) {
long delay = resubmitIntervalSecs[0];
if (resubmitRecord != null) {
long prevDelay = resubmitRecord.getDelayedBy();
int index = 0;
for (; index < resubmitIntervalSecs.length; index++)
if (prevDelay <= resubmitIntervalSecs[index])
break;
index++;
if (index >= resubmitIntervalSecs.length)
index = resubmitIntervalSecs.length - 1;
delay = resubmitIntervalSecs[index];
}
return delay;
}
/**
* Used for testing.
*
* @param workerId
* @param currentTime
*
* @return
*/
long getWorkerResubmitTime(final WorkerId workerId, final int stageNum, final long currentTime) {
String workerKey = generateWorkerIndexStageKey(workerId, stageNum);
final ResubmitRecord prevResubmitRecord = resubmitRecords.get(workerKey);
long delay = evalDelay(prevResubmitRecord);
long resubmitAt = currentTime + delay * 1000;
final ResubmitRecord currResubmitRecord = new ResubmitRecord(workerKey, resubmitAt, delay);
resubmitRecords.put(workerKey, currResubmitRecord);
return resubmitAt;
}
/**
* Get the worker resubmit time for the given worker.
*
* @param workerId
*
* @return
*/
public long getWorkerResubmitTime(final WorkerId workerId, final int stageNum) {
return getWorkerResubmitTime(workerId, stageNum, System.currentTimeMillis());
}
/**
* Appends stage number and worker index.
* @param workerId
* @param stageNum
* @return
*/
String generateWorkerIndexStageKey(WorkerId workerId, int stageNum) {
return stageNum + "_" + workerId.getWorkerIndex();
}
/**
* clears the resubmit cache.
*/
void shutdown() {
resubmitRecords.clear();
}
/**
* Returns the list of resubmit records.
* @return
*/
List<ResubmitRecord> getResubmitRecords() {
Map<String, ResubmitRecord> copy = new HashMap<>(resubmitRecords.size());
List<ResubmitRecord> resubmitRecordList = resubmitRecords.values().stream().collect(Collectors.toList());
return resubmitRecordList;
}
long getExpireResubmitDelaySecs() {
return expireResubmitDelaySecs;
}
public long[] getResubmitIntervalSecs() {
return resubmitIntervalSecs;
}
/**
* Tracks information about a worker resubmit.
*/
static final class ResubmitRecord {
private final String workerKey;
private final long resubmitAt;
private final long delayedBy;
private ResubmitRecord(String workerKey, long resubmitAt, long delayedBy) {
this.workerKey = workerKey;
this.resubmitAt = resubmitAt;
this.delayedBy = delayedBy;
}
public long getDelayedBy() {
return delayedBy;
}
public String getWorkerKey() {
return this.workerKey;
}
public long getResubmitAt() {
return resubmitAt;
}
}
}
| 8,056 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/FilterableMantisStageMetadataWritable.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.server.master.store.MantisStageMetadataWritable;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
@JsonFilter("stageMetadataList")
public class FilterableMantisStageMetadataWritable extends MantisStageMetadataWritable {
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown=true)
public FilterableMantisStageMetadataWritable(@JsonProperty("jobId") String jobId,
@JsonProperty("stageNum") int stageNum,
@JsonProperty("numStages") int numStages,
@JsonProperty("machineDefinition") MachineDefinition machineDefinition,
@JsonProperty("numWorkers") int numWorkers,
@JsonProperty("hardConstraints") List<JobConstraints> hardConstraints,
@JsonProperty("softConstraints") List<JobConstraints> softConstraints,
@JsonProperty("scalingPolicy") StageScalingPolicy scalingPolicy,
@JsonProperty("scalable") boolean scalable) {
super(jobId, stageNum, numStages, machineDefinition, numWorkers, hardConstraints, softConstraints, scalingPolicy, scalable);
}
}
| 8,057 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/MantisJobMetadataView.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.server.master.domain.DataFormatAdapter;
import io.mantisrx.server.master.store.MantisJobMetadata;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.util.List;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@JsonFilter("topLevelFilter")
public class MantisJobMetadataView {
@JsonIgnore
private static final ObjectMapper mapper = new ObjectMapper();
@JsonIgnore
private static final Logger logger = LoggerFactory.getLogger(MantisJobMetadataView.class);
private FilterableMantisJobMetadataWritable jobMetadata;
private long terminatedAt = -1;
private List<FilterableMantisStageMetadataWritable> stageMetadataList = Lists.newArrayList();
private List<FilterableMantisWorkerMetadataWritable> workerMetadataList = Lists.newArrayList();
private String version = "";
public MantisJobMetadataView() {}
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown=true)
public MantisJobMetadataView(@JsonDeserialize(as=FilterableMantisJobMetadataWritable.class) @JsonProperty("jobMetadata") FilterableMantisJobMetadataWritable jobMeta,
@JsonProperty("stageMetadataList") List<FilterableMantisStageMetadataWritable> stageMetadata,
@JsonProperty("workerMetadataList") List<FilterableMantisWorkerMetadataWritable> workerMetadata) {
this.jobMetadata = jobMeta;
this.stageMetadataList = stageMetadata;
this.workerMetadataList = workerMetadata;
}
public MantisJobMetadataView(IMantisJobMetadata jobMeta, long terminatedAt,
List<Integer> stageNumberList, List<Integer> workerIndexList,
List<Integer> workerNumberList, List<WorkerState.MetaState> workerStateList, boolean jobIdOnly) {
if(logger.isTraceEnabled()) { logger.trace("Enter MantisJobMetadataView ctor jobMeta {} workerIndexList {} workerNumberList workerStateList {} jobIdOnly {}", workerIndexList, workerNumberList, workerStateList, jobIdOnly);}
this.jobMetadata = DataFormatAdapter.convertMantisJobMetadataToFilterableMantisJobMetadataWriteable(jobMeta);
this.terminatedAt = terminatedAt;
version = jobMeta.getJobDefinition().getVersion();
if(logger.isDebugEnabled()) { logger.debug("MantisJobMetadataView.terminatedAt set to {}, version set to {}", terminatedAt, version); }
if(!jobIdOnly) {
if(logger.isDebugEnabled()) { logger.debug("MantisJobMetadataView.jobIdOnly is {}", jobIdOnly); }
this.stageMetadataList = jobMeta.getStageMetadata().values().stream()
.filter((IMantisStageMetadata mantisStageMetadata) -> stageFilter(mantisStageMetadata, stageNumberList))
.map(DataFormatAdapter::convertFilterableMantisStageMetadataToMantisStageMetadataWriteable)
.collect(Collectors.toList());
this.workerMetadataList = jobMeta.getStageMetadata().values().stream()
.map(IMantisStageMetadata::getAllWorkers)
.flatMap(jobWorkers -> jobWorkers.stream()
.map(jw -> jw.getMetadata())
.filter((IMantisWorkerMetadata workerMetadata) -> workerFilter(workerMetadata, workerIndexList, workerNumberList, workerStateList))
.map(DataFormatAdapter::convertMantisWorkerMetadataToFilterableMantisWorkerMetadataWritable)
)
.collect(Collectors.toList());
}
if(logger.isTraceEnabled()) { logger.trace("Exit MantisJobMetadataView ctor");}
}
public MantisJobMetadataView(IMantisJobMetadata jobMeta,
final List<Integer> stageNumberList,
final List<Integer> workerIndexList,
final List<Integer> workerNumberList,
final List<WorkerState.MetaState> workerStateList,
final boolean jobIdOnly) {
this(jobMeta, -1, stageNumberList,workerIndexList,workerNumberList,workerStateList,jobIdOnly);
}
private boolean stageFilter(IMantisStageMetadata msmd, List<Integer> stageNumberList) {
if(logger.isTraceEnabled()) { logger.trace("Enter MantisJobMetadataView:stageFilter Stage {} stageNumberList {}", msmd, stageNumberList);}
// no filter specified
if(stageNumberList.isEmpty()) {
if(logger.isTraceEnabled()) { logger.trace("Exit stageFilter with true for stage {}", msmd); }
return true;
}
for(int stageNumber : stageNumberList) {
if(stageNumber == msmd.getStageNum()) {
if(logger.isTraceEnabled()) { logger.trace("Exit stageFilter with true for stage {}", msmd); }
return true;
}
}
if(logger.isTraceEnabled()) { logger.trace("Exit stageFilter with false for stage {}", msmd); }
return false;
}
private boolean workerFilter(IMantisWorkerMetadata mwmd, final List<Integer> workerIndexList,
final List<Integer> workerNumberList,
final List<WorkerState.MetaState> workerStateList) {
if(logger.isTraceEnabled()) { logger.trace("Enter MantisJobMetadataView:workerFilter worker {} indexList {} numberList {} stateList {}", mwmd, workerIndexList, workerNumberList, workerStateList);}
boolean match=false;
// no filter specified
if(workerIndexList.isEmpty() && workerNumberList.isEmpty() && workerStateList.isEmpty()) {
if(logger.isTraceEnabled()) { logger.trace("Exit workerFilter1 with true for worker {}", mwmd); }
return true;
}
for(Integer workerIndex : workerIndexList) {
if(workerIndex == mwmd.getWorkerIndex()) {
if(logger.isTraceEnabled()) { logger.trace("Exit workerFilter2 with true for worker {}", mwmd); }
match = true;
}
if(!match) {
if(logger.isTraceEnabled()) { logger.trace("Exit workerFilter3 with true for worker {}", mwmd); }
return false;
}
}
for(Integer workerNumber : workerNumberList) {
match = workerNumber == mwmd.getWorkerNumber();
if(!match) {
if(logger.isTraceEnabled()) { logger.trace("Exit workerFilter4 with false for worker {}", mwmd); }
return false;
}
}
for(WorkerState.MetaState state : workerStateList) {
match = false;
try {
match = WorkerState.toMetaState(mwmd.getState()).equals(state);
} catch (IllegalArgumentException e) {
}
}
if(!match) {
if(logger.isTraceEnabled()) { logger.trace("Exit workerFilter5 with false for worker {}", mwmd); }
return false;
}
if(logger.isTraceEnabled()) { logger.trace("Exit workerFilter6 with true for worker {}", mwmd); }
return true;
}
public MantisJobMetadata getJobMetadata() {
return jobMetadata;
}
public List<FilterableMantisStageMetadataWritable> getStageMetadataList() {
return stageMetadataList;
}
public List<FilterableMantisWorkerMetadataWritable> getWorkerMetadataList() {
return workerMetadataList;
}
public long getTerminatedAt() {
return terminatedAt;
}
public String getVersion() {
return this.version;
}
@Override
public String toString() {
try {
return mapper.writeValueAsString(this);
} catch (JsonProcessingException e) {
return "MantisJobMetadataView [jobMetadata=" + jobMetadata + ", stageMetadataList=" + stageMetadataList
+ ", workerMetadataList=" + workerMetadataList + "]";
}
}
}
| 8,058 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/MantisJobMetadataImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import io.mantisrx.common.Label;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.master.domain.Costs;
import io.mantisrx.server.master.domain.DataFormatAdapter;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.persistence.exceptions.InvalidJobException;
import io.mantisrx.server.master.persistence.exceptions.InvalidJobStateChangeException;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.net.MalformedURLException;
import java.net.URL;
import java.time.Instant;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import lombok.Getter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@JsonFilter("topLevelFilter")
public class MantisJobMetadataImpl implements IMantisJobMetadata {
private static final Logger logger = LoggerFactory.getLogger(MantisJobMetadataImpl.class);
private final JobId jobId;
private final long submittedAt;
@Getter
private final long heartbeatIntervalSecs;
@Getter
private final long workerTimeoutSecs;
private long startedAt = DEFAULT_STARTED_AT_EPOCH;
private long endedAt = DEFAULT_STARTED_AT_EPOCH;
private JobState state;
private int nextWorkerNumberToUse;
private final JobDefinition jobDefinition;
private Costs jobCosts;
@JsonIgnore
private final Map<Integer, IMantisStageMetadata> stageMetadataMap = new HashMap<>();
@JsonIgnore
private final Map<Integer, Integer> workerNumberToStageMap = new HashMap<>();
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown=true)
public MantisJobMetadataImpl(@JsonProperty("jobId") JobId jobId,
@JsonProperty("submittedAt") long submittedAt,
@JsonProperty("startedAt") long startedAt,
@JsonProperty("jobDefinition") JobDefinition jobDefinition,
@JsonProperty("state") JobState state,
@JsonProperty("nextWorkerNumberToUse") int nextWorkerNumberToUse,
@JsonProperty("heartbeatIntervalSecs") long heartbeatIntervalSecs,
@JsonProperty("workerTimeoutSecs") long workerTimeoutSecs) {
this.jobId = jobId;
this.submittedAt = submittedAt;
this.startedAt = startedAt;
this.state = state==null? JobState.Accepted : state;
this.nextWorkerNumberToUse = nextWorkerNumberToUse;
this.heartbeatIntervalSecs = heartbeatIntervalSecs;
this.workerTimeoutSecs = workerTimeoutSecs;
this.jobDefinition = jobDefinition;
}
@Override
public JobId getJobId() {
return jobId;
}
@Override
public String getClusterName() {
return this.jobDefinition.getName();
}
@JsonIgnore
@Override
public Instant getSubmittedAtInstant() {
return Instant.ofEpochMilli(submittedAt);
}
public long getSubmittedAt() {
return submittedAt;
}
@Override
public long getSubscriptionTimeoutSecs() {
return this.jobDefinition.getSubscriptionTimeoutSecs();
}
@Override
public int getNextWorkerNumberToUse() {
return nextWorkerNumberToUse;
}
public void setNextWorkerNumberToUse(int n, MantisJobStore store) throws Exception{
this.nextWorkerNumberToUse = n;
store.updateJob(this);
}
@Override
public JobState getState() {
return state;
}
@Override
public JobDefinition getJobDefinition() {
return this.jobDefinition;
}
@Override
public String getUser() {
return this.jobDefinition.getUser();
}
@Override
public Optional<JobSla> getSla() {
return Optional.ofNullable(this.jobDefinition.getJobSla());
}
@Override
public List<Parameter> getParameters() {
return this.jobDefinition.getParameters();
}
@Override
public List<Label> getLabels() {
return this.jobDefinition.getLabels();
}
@JsonIgnore
@Override
public int getTotalStages() {
return this.getJobDefinition().getNumberOfStages();
}
@JsonIgnore
@Override
public String getArtifactName() {
return this.jobDefinition.getArtifactName();
}
void setJobState(JobState state, MantisJobStore store) throws Exception {
logger.info("Updating job State from {} to {} ", this.state, state);
if (!this.state.isValidStateChgTo(state)) {
throw new InvalidJobStateChangeException(jobId.getId(), this.state, state);
}
this.state = state;
store.updateJob(this);
}
void setStartedAt(long startedAt, MantisJobStore store) throws Exception {
logger.info("Updating job start time to {} ", startedAt);
this.startedAt = startedAt;
store.updateJob(this);
}
void setJobCosts(Costs jobCosts) {
this.jobCosts = jobCosts;
}
/**
* Add job stage if absent, returning true if it was actually added.
* @param msmd The stage's metadata object.
* @return true if actually added, false otherwise.
*/
public boolean addJobStageIfAbsent(IMantisStageMetadata msmd) {
if(logger.isTraceEnabled()) { logger.trace("Adding stage {} ", msmd); }
boolean result = stageMetadataMap.put(msmd.getStageNum(), msmd) == null;
msmd.getAllWorkers().forEach((worker) -> {
workerNumberToStageMap.put(worker.getMetadata().getWorkerNumber(), msmd.getStageNum());
});
return result;
}
@JsonIgnore
public Map<Integer,? extends IMantisStageMetadata> getStageMetadata() {
return Collections.unmodifiableMap(stageMetadataMap);
}
public final Map<Integer, Integer> getWorkerNumberToStageMap() {
return Collections.unmodifiableMap(this.workerNumberToStageMap);
}
@JsonIgnore
public Optional<IMantisStageMetadata> getStageMetadata(int stageNum) {
return Optional.ofNullable(stageMetadataMap.get(stageNum));
}
/**
* Replace meta data for the given worker with a newly created worker that has not been dispatched yet.
* Dispatch happens after this method returns.
* Delegates the actual replacing to occur in the StageMetadata.
* @param stageNum
* @param newWorker
* @param oldWorker
* @param jobStore
* @return
* @throws Exception
*/
boolean replaceWorkerMetaData(int stageNum, JobWorker newWorker, JobWorker oldWorker, MantisJobStore jobStore) throws Exception {
boolean result = true;
((MantisStageMetadataImpl) stageMetadataMap.get(stageNum)).replaceWorkerIndex(newWorker, oldWorker, jobStore);
// remove mapping for replaced worker
removeWorkerMetadata(oldWorker.getMetadata().getWorkerNumber());
Integer integer = workerNumberToStageMap.put(newWorker.getMetadata().getWorkerNumber(), stageNum);
if (integer != null && integer != stageNum) {
logger.error("Unexpected to put worker number mapping from {} to stage {} for job {}, prev mapping to stage {}",
newWorker.getMetadata().getWorkerNumber(), stageNum, newWorker.getMetadata().getJobId(), integer);
}
return result;
}
public boolean addWorkerMetadata(int stageNum, JobWorker newWorker) {
if (logger.isTraceEnabled()) { logger.trace("Adding workerMetadata {} for stage {}", stageNum, newWorker); }
if (!stageMetadataMap.containsKey(stageNum)) {
logger.warn("No such stage {}", stageNum);
}
final boolean result = ((MantisStageMetadataImpl) stageMetadataMap.get(stageNum)).addWorkerIndex(newWorker);
if (result) {
Integer integer = workerNumberToStageMap.put(newWorker.getMetadata().getWorkerNumber(), stageNum);
if (integer != null && integer != stageNum) {
logger.error("Unexpected to put worker number mapping from {} to stage {} for job {}, prev mapping to stage {}",
newWorker.getMetadata().getWorkerNumber(), stageNum, newWorker.getMetadata().getJobId(), integer);
}
if (logger.isTraceEnabled()) { logger.trace("Exit addworkerMeta {}", workerNumberToStageMap); }
}
return result;
}
boolean removeWorkerMetadata(int workerNumber) {
if(workerNumberToStageMap.containsKey(workerNumber)) {
workerNumberToStageMap.remove(workerNumber);
return true;
}
return false;
}
@JsonIgnore
public Optional<JobWorker> getWorkerByIndex(int stageNumber, int workerIndex) throws InvalidJobException {
Optional<IMantisStageMetadata> stage = getStageMetadata(stageNumber);
if(stage.isPresent()) {
return Optional.ofNullable(stage.get().getWorkerByIndex(workerIndex));
}
return Optional.empty();
//throw new InvalidJobException(jobId, stageNumber, workerIndex);
}
@JsonIgnore
public Optional<JobWorker> getWorkerByNumber(int workerNumber) throws InvalidJobException {
Integer stageNumber = workerNumberToStageMap.get(workerNumber);
if(stageNumber == null) {
return Optional.empty();
}
IMantisStageMetadata stage = stageMetadataMap.get(stageNumber);
if(stage == null) {
return Optional.empty();
}
return Optional.ofNullable(stage.getWorkerByWorkerNumber(workerNumber));
}
@JsonIgnore
public int getMaxWorkerNumber() {
// Expected to be called only during initialization, no need to synchronize/lock.
// Resubmitted workers are expected to have a worker number greater than those they replace.
int max=-1;
for(int id: workerNumberToStageMap.keySet())
if(max < id) max = id;
return max;
}
@JsonIgnore
@Override
public SchedulingInfo getSchedulingInfo() {
return this.jobDefinition.getSchedulingInfo();
}
@Override
public long getMinRuntimeSecs() {
return this.jobDefinition.getJobSla().getMinRuntimeSecs();
}
/**
* Migrate to using the getArtifactName and getArtifactVersion
*/
@Deprecated @Override
public URL getJobJarUrl() {
try {
return DataFormatAdapter.generateURL(getArtifactName());
} catch (MalformedURLException e) {
// should not happen
throw new RuntimeException(e);
}
}
@Override
public Optional<Instant> getStartedAtInstant() {
if(this.startedAt == DEFAULT_STARTED_AT_EPOCH) {
return Optional.empty();
} else {
return Optional.of(Instant.ofEpochMilli(startedAt));
}
}
public long getStartedAt() {
return this.startedAt;
}
@Override
public Optional<Instant> getEndedAtInstant() {
if(this.endedAt == DEFAULT_STARTED_AT_EPOCH) {
return Optional.empty();
} else {
return Optional.of(Instant.ofEpochMilli(endedAt));
}
}
public long getEndedAt() {
return this.endedAt;
}
public static class Builder {
JobId jobId;
String user;
JobDefinition jobDefinition;
long submittedAt;
long startedAt;
JobState state;
int nextWorkerNumberToUse = 1;
long heartbeatIntervalSecs = 0;
long workerTimeoutSecs = 0;
Costs jobCosts;
public Builder() {
}
public Builder(MantisJobMetadataImpl mJob) {
this.jobId = mJob.getJobId();
this.jobDefinition = mJob.getJobDefinition();
this.submittedAt = mJob.getSubmittedAt();
this.state = mJob.getState();
this.nextWorkerNumberToUse = mJob.getNextWorkerNumberToUse();
this.heartbeatIntervalSecs = mJob.getHeartbeatIntervalSecs();
this.workerTimeoutSecs = mJob.getWorkerTimeoutSecs();
this.jobCosts = mJob.getJobCosts();
}
public Builder withJobId(JobId jobId) {
this.jobId = jobId;
return this;
}
public Builder withJobDefinition(JobDefinition jD) {
this.jobDefinition = jD;
return this;
}
public Builder withSubmittedAt(long submittedAt) {
this.submittedAt = submittedAt;
return this;
}
public Builder withSubmittedAt(Instant submittedAt) {
this.submittedAt = submittedAt.toEpochMilli();
return this;
}
public Builder withStartedAt(Instant startedAt) {
this.startedAt = startedAt.toEpochMilli();
return this;
}
public Builder withJobState(JobState state) {
this.state = state;
return this;
}
public Builder withNextWorkerNumToUse(int workerNum) {
this.nextWorkerNumberToUse = workerNum;
return this;
}
public Builder withJobCost(Costs costs) {
this.jobCosts = costs;
return this;
}
public Builder withHeartbeatIntervalSecs(long secs) {
this.heartbeatIntervalSecs = secs;
return this;
}
public Builder withWorkerTimeoutSecs(long secs) {
this.workerTimeoutSecs = secs;
return this;
}
public Builder from(MantisJobMetadataImpl mJob) {
this.jobId = mJob.getJobId();
this.jobDefinition = mJob.getJobDefinition();
this.submittedAt = mJob.getSubmittedAt();
this.state = mJob.getState();
this.jobCosts = mJob.getJobCosts();
this.nextWorkerNumberToUse = mJob.getNextWorkerNumberToUse();
this.heartbeatIntervalSecs = mJob.getHeartbeatIntervalSecs();
this.workerTimeoutSecs = mJob.getWorkerTimeoutSecs();
return this;
}
public MantisJobMetadataImpl build() {
return new MantisJobMetadataImpl(jobId, submittedAt, startedAt, jobDefinition, state, nextWorkerNumberToUse, heartbeatIntervalSecs, workerTimeoutSecs);
}
}
@Override
public String toString() {
return "MantisJobMetadataImpl{" +
"jobId=" + jobId +
", submittedAt=" + submittedAt +
", startedAt=" + startedAt +
", endedAt=" + endedAt +
", state=" + state +
", nextWorkerNumberToUse=" + nextWorkerNumberToUse +
", jobDefinition=" + jobDefinition +
", stageMetadataMap=" + stageMetadataMap +
", heartbeatIntervalSecs=" + heartbeatIntervalSecs +
", workerTimeoutSecs=" + workerTimeoutSecs +
", workerNumberToStageMap=" + workerNumberToStageMap +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MantisJobMetadataImpl that = (MantisJobMetadataImpl) o;
return submittedAt == that.submittedAt &&
startedAt == that.startedAt &&
endedAt == that.endedAt &&
nextWorkerNumberToUse == that.nextWorkerNumberToUse &&
heartbeatIntervalSecs == that.heartbeatIntervalSecs &&
Objects.equals(jobId, that.jobId) &&
state == that.state &&
Objects.equals(jobDefinition, that.jobDefinition);
}
@Override
public int hashCode() {
return Objects.hash(jobId, submittedAt, startedAt, endedAt, state, nextWorkerNumberToUse, heartbeatIntervalSecs, jobDefinition);
}
@Override
public Costs getJobCosts() {
return jobCosts;
}
}
| 8,059 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/NoopCostsCalculator.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import io.mantisrx.server.master.domain.Costs;
public class NoopCostsCalculator implements CostsCalculator {
@Override
public Costs calculateCosts(IMantisJobMetadata jobMetadata) {
return Costs.ZERO;
}
}
| 8,060 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/IWorkerManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import java.time.Instant;
import java.util.List;
import rx.subjects.BehaviorSubject;
/**
* Declares the behavior of the WorkerManager which is embedded within a JobManager.
*/
public interface IWorkerManager {
/**
* Perform any cleanup during job shutdown.
*/
void shutdown();
/**
* Handle worker related events.
*
* @param event
* @param jobState
*/
void processEvent(WorkerEvent event, JobState jobState);
/**
* Iterate through all active workers and identify and restart workers that have not sent a heart beat
* within a configured time.
*
* @param now
*/
void checkHeartBeats(Instant now);
/**
* Invoked during Agent deploy. Resubmit workers that are currently running on old VMs.
*
* @param now
*/
void migrateDisabledVmWorkers(Instant now);
/**
* Increase or decrease the number of workers associated with the given stage.
*
* @param stageMetaData
* @param numWorkers
* @param reason
*
* @return
*/
int scaleStage(MantisStageMetadataImpl stageMetaData, int numWorkers, String reason);
/**
* Explicitly kill and resubmit worker associated with the given workerNumber.
*
* @param workerNumber
*
* @throws Exception
*/
void resubmitWorker(int workerNumber) throws Exception;
/**
* Get a list of currently active workers {@link IMantisWorkerMetadata}.
*
* @param limit
*
* @return
*/
List<IMantisWorkerMetadata> getActiveWorkers(int limit);
/**
* Returns a {@link BehaviorSubject} where job status updates are published.
*
* @return
*/
BehaviorSubject<JobSchedulingInfo> getJobStatusSubject();
/**
* Force sending any updates in worker data.
*/
void refreshAndSendWorkerAssignments();
}
| 8,061 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/MantisStageMetadataImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import static java.util.Optional.of;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.master.jobcluster.job.worker.WorkerTerminate;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.master.WorkerRequest;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.persistence.exceptions.InvalidJobException;
import io.mantisrx.server.master.persistence.exceptions.InvalidWorkerStateChangeException;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements the {@link IMantisStageMetadata} interface. Represents information related to a Job stage.
*/
public class MantisStageMetadataImpl implements IMantisStageMetadata {
private final JobId jobId;
private final int stageNum;
private final int numStages;
private final MachineDefinition machineDefinition;
private int numWorkers;
@JsonIgnore
private boolean isSubscribed = false;
private final List<JobConstraints> hardConstraints;
private final List<JobConstraints> softConstraints;
// scaling policy be null
private StageScalingPolicy scalingPolicy;
private boolean scalable;
@JsonIgnore
private final ConcurrentMap<Integer, JobWorker> workerByIndexMetadataSet;
@JsonIgnore
private final ConcurrentMap<Integer, JobWorker> workerByNumberMetadataSet;
private static final Logger LOGGER = LoggerFactory.getLogger(MantisStageMetadataImpl.class);
/**
* Default constructor.
* @param jobId
* @param stageNum
* @param numStages
* @param machineDefinition
* @param numWorkers
* @param hardConstraints
* @param softConstraints
* @param scalingPolicy
* @param scalable
*/
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public MantisStageMetadataImpl(@JsonProperty("jobId") JobId jobId,
@JsonProperty("stageNum") int stageNum,
@JsonProperty("numStages") int numStages,
@JsonProperty("machineDefinition") MachineDefinition machineDefinition,
@JsonProperty("numWorkers") int numWorkers,
@JsonProperty("hardConstraints") List<JobConstraints> hardConstraints,
@JsonProperty("softConstraints") List<JobConstraints> softConstraints,
@JsonProperty("scalingPolicy") StageScalingPolicy scalingPolicy,
@JsonProperty("scalable") boolean scalable) {
this.jobId = jobId;
this.stageNum = stageNum;
this.numStages = numStages;
this.machineDefinition = machineDefinition;
this.numWorkers = numWorkers;
this.hardConstraints = hardConstraints;
this.softConstraints = softConstraints;
this.scalingPolicy = scalingPolicy;
this.scalable = scalable;
workerByIndexMetadataSet = new ConcurrentHashMap<>();
workerByNumberMetadataSet = new ConcurrentHashMap<>();
}
@Override
public JobId getJobId() {
return jobId;
}
@Override
public int getStageNum() {
return stageNum;
}
@Override
public int getNumStages() {
return numStages;
}
@Override
public int getNumWorkers() {
return numWorkers;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MantisStageMetadataImpl that = (MantisStageMetadataImpl) o;
return stageNum == that.stageNum && numStages == that.numStages && numWorkers == that.numWorkers
&& scalable == that.scalable && Objects.equals(jobId, that.jobId)
&& Objects.equals(machineDefinition, that.machineDefinition)
&& Objects.equals(hardConstraints, that.hardConstraints)
&& Objects.equals(softConstraints, that.softConstraints)
&& Objects.equals(scalingPolicy, that.scalingPolicy);
}
@Override
public int hashCode() {
return Objects.hash(jobId, stageNum, numStages, machineDefinition, numWorkers, hardConstraints,
softConstraints, scalingPolicy, scalable);
}
/**
* Builder to create an instance of {@link MantisStageMetadataImpl}.
*/
public static class Builder {
private JobId jobId;
private int stageNum = -1;
private int numStages = 0;
private MachineDefinition machineDefinition;
private int numWorkers = 0;
private List<JobConstraints> hardConstraints = Collections.emptyList();
private List<JobConstraints> softConstraints = Collections.emptyList();
private StageScalingPolicy scalingPolicy;
private boolean scalable;
/**
* Ctor.
*/
public Builder() {
}
/**
* Sets the {@link JobId}.
* @param jId
* @return
*/
public Builder withJobId(JobId jId) {
this.jobId = jId;
return this;
}
/**
* Sets the stage number.
* @param stageNum
* @return
*/
public Builder withStageNum(int stageNum) {
this.stageNum = stageNum;
return this;
}
/**
* Sets the total number of stages.
* @param numStages
* @return
*/
public Builder withNumStages(int numStages) {
this.numStages = numStages;
return this;
}
/**
* Sets the {@link MachineDefinition} to be used by the workers of this stage.
* @param md
* @return
*/
public Builder withMachineDefinition(MachineDefinition md) {
this.machineDefinition = md;
return this;
}
/**
* The total number of workers in this stage.
* @param numWorkers
* @return
*/
public Builder withNumWorkers(int numWorkers) {
this.numWorkers = numWorkers;
return this;
}
/**
* Sets the mandatory scheduling constraints associated with this stage.
* @param hardC
* @return
*/
public Builder withHardConstraints(List<JobConstraints> hardC) {
if (hardC != null) {
this.hardConstraints = hardC;
}
return this;
}
/**
* Sets the best effort scheduling constraints associated with this stage.
* @param softC
* @return
*/
public Builder withSoftConstraints(List<JobConstraints> softC) {
if (softC != null) {
this.softConstraints = softC;
}
return this;
}
/**
* The scaling policy associated with this stage.
* @param pol
* @return
*/
public Builder withScalingPolicy(StageScalingPolicy pol) {
this.scalingPolicy = pol;
return this;
}
/**
* Sets the whether this stage is scalable.
* @param s
* @return
*/
public Builder isScalable(boolean s) {
scalable = s;
return this;
}
/**
* Convenience method to clone data from an old worker of this stage.
* @param workerRequest
* @return
*/
public Builder from(WorkerRequest workerRequest) {
Objects.requireNonNull(workerRequest);
this.jobId = (JobId.fromId(workerRequest.getJobId()).orElse(null));
this.stageNum = (workerRequest.getWorkerStage());
this.numStages = (workerRequest.getTotalStages());
this.machineDefinition = (workerRequest.getDefinition());
this.numWorkers = (workerRequest.getNumInstancesAtStage());
this.hardConstraints = (workerRequest.getHardConstraints() != null ? workerRequest.getHardConstraints()
: new ArrayList<>());
this.softConstraints = (workerRequest.getSoftConstraints() != null ? workerRequest.getSoftConstraints()
: new ArrayList<>());
this.scalingPolicy = (workerRequest.getSchedulingInfo().forStage(
workerRequest.getWorkerStage()).getScalingPolicy());
this.scalable = (workerRequest.getSchedulingInfo().forStage(workerRequest.getWorkerStage()).getScalable());
return this;
}
/**
* Builds and returns an instance of {@link MantisStageMetadataImpl}.
* @return
*/
public IMantisStageMetadata build() {
Objects.requireNonNull(jobId, "JobId cannot be null");
//Objects.requireNonNull(scalingPolicy, "Scaling policy cannot be null");
if (stageNum <= -1) {
throw new IllegalArgumentException(String.format("Invalid stage number %d", stageNum));
}
if (numStages <= 0) {
throw new IllegalArgumentException(String.format("Invalid no of stages %d", numStages));
}
return new MantisStageMetadataImpl(jobId, stageNum, numStages, machineDefinition, numWorkers,
hardConstraints, softConstraints, scalingPolicy, scalable);
}
}
/**
* Updates the total number of workers in this stage.
* @param numWorkers
* @param store
* @throws Exception
*/
public void unsafeSetNumWorkers(int numWorkers, MantisJobStore store) throws Exception {
this.numWorkers = numWorkers;
store.updateStage(this);
}
/**
* Removes the referenced worker from this stage.
* @param index
* @param number
* @param store
* @return
*/
public boolean unsafeRemoveWorker(int index, int number, MantisJobStore store) {
final JobWorker removedIdx = workerByIndexMetadataSet.remove(index);
final JobWorker removedNum = workerByNumberMetadataSet.remove(number);
if (removedIdx != null && removedNum != null && removedIdx.getMetadata().getWorkerNumber() == number
&& removedNum.getMetadata().getWorkerIndex() == index) {
LOGGER.info("Worker index {} - number {} marked for deletion", index, number);
// pendingDeleteWorkerMap.put(number, removedNum);
try {
archiveWorker(removedIdx.getMetadata(), store);
} catch (IOException e) {
e.printStackTrace();
}
return true;
}
return false;
}
@Override
public List<JobConstraints> getHardConstraints() {
return Collections.unmodifiableList(hardConstraints);
}
@Override
public List<JobConstraints> getSoftConstraints() {
return Collections.unmodifiableList(softConstraints);
}
@Override
public StageScalingPolicy getScalingPolicy() {
return scalingPolicy;
}
@Override
public boolean getScalable() {
return scalable;
}
@Override
public MachineDefinition getMachineDefinition() {
return machineDefinition;
}
@Deprecated
@JsonIgnore
@Override
public Collection<JobWorker> getWorkerByIndexMetadataSet() {
return Collections.unmodifiableCollection(workerByIndexMetadataSet.values());
}
@JsonIgnore
@Override
public Collection<JobWorker> getAllWorkers() {
return Collections.unmodifiableCollection(workerByNumberMetadataSet.values());
}
@JsonIgnore
@Override
public JobWorker getWorkerByIndex(int workerId) throws InvalidJobException {
JobWorker worker = workerByIndexMetadataSet.get(workerId);
if (worker == null)
throw new InvalidJobException(jobId, -1, workerId);
return worker;
}
@JsonIgnore
@Override
public JobWorker getWorkerByWorkerNumber(int workerNumber) throws InvalidJobException {
JobWorker worker = workerByNumberMetadataSet.get(workerNumber);
if (worker == null)
throw new InvalidJobException(jobId, -1, workerNumber);
return worker;
}
/**
* Remove the given worker from the stage if it is in a terminal state.
* @param workerNumber
* @return
*/
JobWorker removeWorkerInFinalState(int workerNumber) {
JobWorker worker = workerByNumberMetadataSet.get(workerNumber);
if (worker != null && WorkerState.isTerminalState(worker.getMetadata().getState())) {
workerByNumberMetadataSet.remove(workerNumber);
return worker;
}
return null;
}
/**
* Removes any workers from the state that are in a terminal state.
* @return
*/
public Collection<JobWorker> removeArchiveableWorkers() {
Collection<JobWorker> removedWorkers = new LinkedList<>();
Set<Integer> workerNumbers = new HashSet<>(workerByNumberMetadataSet.keySet());
for (Integer w : workerNumbers) {
JobWorker worker = workerByNumberMetadataSet.get(w);
final JobWorker wi = workerByIndexMetadataSet.get(worker.getMetadata().getWorkerIndex());
if (wi == null || wi.getMetadata().getWorkerNumber() != worker.getMetadata().getWorkerNumber()) {
workerByNumberMetadataSet.remove(w);
removedWorkers.add(worker);
}
}
return removedWorkers;
}
/**
* Replace the old worker with the new worker. New worker has not been scheduled yet so it is
* just an in memory representation.
* Invalid conditions:
* 1. New worker is in error state
* 2. Old worker Index != new worker Index
* 3. Given Old worker is in fact the one associated with this index
* <p>
* Does the following:
* 1. Marks old worker as terminated
* 2. Associates new worker to this index
* 3. Removes old worker from number -> worker set
* 4. Saves the data to the store
* 5. Associates new worker number -> new worker
* 6. archives the worker
*
* @param newWorker
* @param oldWorker
* @param jobStore
*/
public void replaceWorkerIndex(JobWorker newWorker, JobWorker oldWorker, MantisJobStore jobStore)
throws Exception {
Preconditions.checkNotNull(newWorker, "Replacement worker cannot be null");
Preconditions.checkNotNull(oldWorker, "old worker cannot be null");
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("In MantisStageMetadataImpl:replaceWorkerIndex oldWorker {} new Worker {} for Job {}",
oldWorker, newWorker, this.getJobId());
}
IMantisWorkerMetadata newWorkerMetadata = newWorker.getMetadata();
IMantisWorkerMetadata oldWorkerMetadata = oldWorker.getMetadata();
int index = newWorkerMetadata.getWorkerIndex();
boolean result = true;
// check if new worker is in error state
if (WorkerState.isErrorState(newWorkerMetadata.getState())) {
// should not get here
String errMsg = String.format("New worker cannot be in error state %s", newWorkerMetadata.getState());
LOGGER.error(errMsg);
throw new IllegalStateException(errMsg);
}
// if old worker is null, ensure no other worker is associated with this index
if (!workerByIndexMetadataSet.containsKey(index)) {
// This index is associated with some worker but given oldWorker is null abort.
String errMsg = String.format("Index %d does not exist in workerByIndexMetadataSet %s for job %s", index,
workerByIndexMetadataSet, this.jobId);
throw new IllegalArgumentException(errMsg);
} else {
if (oldWorkerMetadata.getWorkerIndex() != index) {
String errMsg = String.format("While replacing worker in Job %s, Old worker index %d does not match new %d",
this.jobId, oldWorkerMetadata.getWorkerIndex(), index);
LOGGER.error(errMsg);
throw new IllegalArgumentException(errMsg);
}
LOGGER.debug("workerByIndexMetadatSet {}", workerByIndexMetadataSet);
// confirm old worker is present in the workerByIndexSet with the given worker number
JobWorker worker = workerByIndexMetadataSet.get(index);
if (worker.getMetadata().getWorkerNumber() != oldWorkerMetadata.getWorkerNumber()) {
String errMsg = String.format("Did not replace worker %d with %d for index %d of job %s, different worker %d exists already",
oldWorkerMetadata.getWorkerNumber(), newWorkerMetadata.getWorkerNumber(), newWorkerMetadata.getWorkerIndex(),
jobId, worker.getMetadata().getWorkerNumber());
throw new IllegalArgumentException(errMsg);
} else {
// mark old worker as terminated
processWorkerEvent(new WorkerTerminate(oldWorkerMetadata.getWorkerId(), WorkerState.Failed,
JobCompletedReason.Relaunched, System.currentTimeMillis()), jobStore);
// insert new worker
workerByIndexMetadataSet.put(index, newWorker);
// remove old worker from workerNumberSet
removeWorkerInFinalState(oldWorkerMetadata.getWorkerNumber());
// persist changes
jobStore.replaceTerminatedWorker(oldWorkerMetadata, newWorkerMetadata);
workerByNumberMetadataSet.put(newWorkerMetadata.getWorkerNumber(), newWorker);
// archive worker
try {
archiveWorker(oldWorkerMetadata, jobStore);
} catch (Exception e) {
LOGGER.error("Exception archiving worker", e);
}
LOGGER.info("Replaced worker {} with {} for index {} of job {}", oldWorkerMetadata.getWorkerNumber(),
newWorkerMetadata.getWorkerNumber(), newWorkerMetadata.getWorkerIndex(), jobId);
}
}
}
private void archiveWorker(IMantisWorkerMetadata worker, MantisJobStore jobStore) throws IOException {
jobStore.archiveWorker(worker);
}
/**
* Adds the given {@link JobWorker} to this stage.
* @param newWorker
* @return
*/
public boolean addWorkerIndex(JobWorker newWorker) {
IMantisWorkerMetadata newWorkerMetadata = newWorker.getMetadata();
if (workerByIndexMetadataSet.putIfAbsent(newWorkerMetadata.getWorkerIndex(), newWorker) != null) {
LOGGER.warn("WorkerIndex {} already exists. Existing worker={} ",
newWorkerMetadata.getWorkerIndex(), workerByIndexMetadataSet.get(
newWorkerMetadata.getWorkerIndex()));
return false;
}
workerByNumberMetadataSet.put(newWorkerMetadata.getWorkerNumber(), newWorker);
return true;
}
/**
* Updates the the state of a worker based on the worker event.
* @param event
* @param jobStore
* @return
*/
public Optional<JobWorker> processWorkerEvent(WorkerEvent event, MantisJobStore jobStore) {
try {
JobWorker worker = getWorkerByIndex(event.getWorkerId().getWorkerIndex());
try {
worker.processEvent(event, jobStore);
} catch (InvalidWorkerStateChangeException wex) {
LOGGER.warn("InvalidWorkerStateChangeException from: ", wex);
}
return of(worker);
} catch (Exception e) {
LOGGER.warn("Exception saving worker update", e);
}
return Optional.empty();
}
/**
* Iterates through all workers of this stage and returns true if all workers are in started state.
* @return
*/
@JsonIgnore
public boolean isAllWorkerStarted() {
for (JobWorker w : workerByIndexMetadataSet.values()) {
if (!w.getMetadata().getState().equals(WorkerState.Started))
return false;
}
return true;
}
/**
* Iterates through all workers of this stage and returns true if all workers are in terminal state.
* @return
*/
@JsonIgnore
public boolean isAllWorkerCompleted() {
for (JobWorker w : workerByIndexMetadataSet.values()) {
if (!WorkerState.isTerminalState(w.getMetadata().getState())) {
LOGGER.debug("isAllWorkerCompleted returns false");
return false;
}
}
LOGGER.info("isAllWorkerCompleted returns true");
return true;
}
/**
* Returns the number of workers that are in started state.
* @return
*/
@JsonIgnore
public int getNumStartedWorkers() {
int startedCount = 0;
for (JobWorker w : workerByIndexMetadataSet.values()) {
if (w.getMetadata().getState().equals(WorkerState.Started))
startedCount++;
}
return startedCount;
}
@Override
public String toString() {
return "MantisStageMetadataImpl [jobId=" + jobId + ", stageNum=" + stageNum + ", numStages=" + numStages
+ ", machineDefinition=" + machineDefinition + ", numWorkers=" + numWorkers + ", hardConstraints="
+ hardConstraints + ", softConstraints=" + softConstraints + ", scalingPolicy=" + scalingPolicy
+ ", scalable=" + scalable + ", workerByIndexMetadataSet=" + workerByIndexMetadataSet
+ ", workerByNumberMetadataSet=" + workerByNumberMetadataSet + "]";
}
}
| 8,062 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/JobActor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import static io.mantisrx.master.StringConstants.MANTIS_MASTER_USER;
import static io.mantisrx.master.events.LifecycleEventsProto.StatusEvent.StatusEventType.*;
import static io.mantisrx.master.jobcluster.job.worker.MantisWorkerMetadataImpl.MANTIS_SYSTEM_ALLOCATED_NUM_PORTS;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.*;
import static java.util.Optional.*;
import akka.actor.*;
import com.netflix.fenzo.ConstraintEvaluator;
import com.netflix.fenzo.VMTaskFitnessCalculator;
import com.netflix.spectator.api.BasicTag;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.master.akka.MantisActorSupervisorStrategy;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventsProto;
import io.mantisrx.master.jobcluster.WorkerInfoListHolder;
import io.mantisrx.master.jobcluster.job.worker.*;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.*;
import io.mantisrx.master.jobcluster.proto.JobClusterProto;
import io.mantisrx.master.jobcluster.proto.JobProto;
import io.mantisrx.master.jobcluster.proto.JobProto.InitJob;
import io.mantisrx.master.jobcluster.proto.JobProto.JobInitialized;
import io.mantisrx.runtime.*;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.server.core.*;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.domain.JobMetadata;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.ConstraintsEvaluators;
import io.mantisrx.server.master.InvalidJobRequest;
import io.mantisrx.server.master.agentdeploy.MigrationStrategyFactory;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.server.master.domain.DataFormatAdapter;
import io.mantisrx.server.master.domain.IJobClusterDefinition;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.persistence.exceptions.InvalidJobException;
import io.mantisrx.server.master.persistence.exceptions.InvalidWorkerStateChangeException;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.scheduler.*;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import io.mantisrx.shaded.com.google.common.cache.Cache;
import io.mantisrx.shaded.com.google.common.cache.CacheBuilder;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.io.IOException;
import java.time.Duration;
import java.time.Instant;
import java.util.*;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.schedulers.Schedulers;
import rx.subjects.BehaviorSubject;
/**
* Actor responsible for handling all operations for a given JobID. private static final String
* API_JOB_SUBMIT_PATH="/api/submit"; private static final String API_JOB_KILL="/api/jobs/kill"; private static final
* String API_JOB_STAGE_SCALE="/api/jobs/scaleStage"; private static final String API_JOB_RESUBMIT_WORKER="/api/jobs/resubmitWorker";
*
* @author njoshi
*/
public class JobActor extends AbstractActorWithTimers implements IMantisJobManager {
private static final String CHECK_HB_TIMER_KEY = "CHECK_HB";
private static final String REFRESH_SEND_STAGE_ASSIGNEMNTS_KEY = "REFRESH_SEND_STAGE_ASSIGNMENTS";
private static final Logger LOGGER = LoggerFactory.getLogger(JobActor.class);
private static final double DEFAULT_JOB_MASTER_CORES = 1;
private static final double DEFAULT_JOB_MASTER_MEM = 1024;
private static final double DEFAULT_JOB_MASTER_NW = 128;
private static final double DEFAULT_JOB_MASTER_DISK = 1024;
private final Metrics metrics;
private final MetricGroupId metricsGroupId;
private final Counter numWorkerResubmissions;
private final Counter numWorkerResubmitLimitReached;
private final Counter numWorkerTerminated;
private final Counter numScaleStage;
private final Counter numWorkersCompletedNotTerminal;
private final Counter numSchedulingChangesRefreshed;
private final Counter numMissingWorkerPorts;
/**
* Behavior after being initialized.
*/
private Receive initializedBehavior;
/**
* Behavior once active.
*/
private Receive activeBehavior;
/**
* Behavior during termination.
*/
private Receive terminatingBehavior;
/**
* Behavior after termination waiting for JCA to terminate actor.
*/
private Receive terminatedBehavior;
private final String clusterName;
private final JobId jobId;
private final IJobClusterDefinition jobClusterDefinition;
private volatile MantisJobMetadataImpl mantisJobMetaData;
private final MantisJobStore jobStore;
// load from config
private int workerWritesBatchSize = 10;
// Manages life cycle of worker
private IWorkerManager workerManager = null;
// Used to schedule and unschedule workers
private final MantisScheduler mantisScheduler;
private final LifecycleEventPublisher eventPublisher;
private final CostsCalculator costsCalculator;
private boolean hasJobMaster;
private volatile boolean allWorkersCompleted = false;
/**
* Used by the JobCluster Actor to create this Job Actor.
*
* @param jobClusterDefinition The job cluster definition to be used while creating this job.
* @param jobMetadata The job metadata provided by the user.
* @param jobStore Reference to the persistence store {@link MantisJobStore}.
* @param mantisScheduler Reference to the {@link MantisScheduler} to be used to schedule work
* @param eventPublisher Reference to the event publisher {@link LifecycleEventPublisher} where lifecycle
* events are to be published.
* @return
*/
public static Props props(
final IJobClusterDefinition jobClusterDefinition,
final MantisJobMetadataImpl jobMetadata,
final MantisJobStore jobStore,
final MantisScheduler mantisScheduler,
final LifecycleEventPublisher eventPublisher,
final CostsCalculator costsCalculator) {
return Props.create(JobActor.class, jobClusterDefinition, jobMetadata, jobStore,
mantisScheduler, eventPublisher, costsCalculator);
}
/**
* This is invoked indirectly via props method to create an instance of this class.
*
* @param jobClusterDefinition
* @param jobMetadata
* @param jobStore
* @param scheduler
* @param eventPublisher
*/
public JobActor(
final IJobClusterDefinition jobClusterDefinition,
final MantisJobMetadataImpl jobMetadata,
final MantisJobStore jobStore,
final MantisScheduler scheduler,
final LifecycleEventPublisher eventPublisher,
final CostsCalculator costsCalculator) {
this.clusterName = jobMetadata.getClusterName();
this.jobId = jobMetadata.getJobId();
this.jobStore = jobStore;
this.jobClusterDefinition = jobClusterDefinition;
this.mantisScheduler = scheduler;
this.eventPublisher = eventPublisher;
this.mantisJobMetaData = jobMetadata;
this.costsCalculator = costsCalculator;
initializedBehavior = getInitializedBehavior();
activeBehavior = getActiveBehavior();
terminatingBehavior = getTerminatingBehavior();
terminatedBehavior = getTerminatedBehavior();
this.metricsGroupId = getMetricGroupId(jobId.getId(), getResourceCluster());
Metrics m = new Metrics.Builder()
.id(metricsGroupId)
.addCounter("numWorkerResubmissions")
.addCounter("numWorkerResubmitLimitReached")
.addCounter("numWorkerTerminated")
.addCounter("numScaleStage")
.addCounter("numWorkersCompletedNotTerminal")
.addCounter("numSchedulingChangesRefreshed")
.addCounter("numMissingWorkerPorts")
.build();
this.metrics = MetricsRegistry.getInstance().registerAndGet(m);
this.numWorkerResubmissions = metrics.getCounter("numWorkerResubmissions");
this.numWorkerResubmitLimitReached = metrics.getCounter("numWorkerResubmitLimitReached");
this.numWorkerTerminated = metrics.getCounter("numWorkerTerminated");
this.numScaleStage = metrics.getCounter("numScaleStage");
this.numWorkersCompletedNotTerminal = metrics.getCounter("numWorkersCompletedNotTerminal");
this.numSchedulingChangesRefreshed = metrics.getCounter("numSchedulingChangesRefreshed");
this.numMissingWorkerPorts = metrics.getCounter("numMissingWorkerPorts");
}
/**
* Create a MetricGroupId using the given job Id.
*
* @param id
* @param resourceCluster
* @return
*/
MetricGroupId getMetricGroupId(String id, String resourceCluster) {
return new MetricGroupId("JobActor", new BasicTag("jobId", id), new BasicTag("resourceCluster", resourceCluster));
}
/**
* Validates the job definition, stores the job to persistence. Instantiates the SubscriptionManager to keep track
* of subscription and runtime timeouts Instantiates the WorkerManager which manages the worker life cycle
*
* @throws InvalidJobRequest
* @throws InvalidJobException
*/
void initialize(boolean isSubmit) throws Exception {
LOGGER.info("Initializing Job {}", jobId);
if (isSubmit) {
eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(INFO,
"Job request received", getJobId(), getJobState()));
// Ignore isReady flag, if the job is autoscaled it gets a Job Master
// this.jobClusterDefinition.getIsReadyForJobMaster() &&
if (isAutoscaled(mantisJobMetaData.getSchedulingInfo())) {
LOGGER.info("Job is autoscaled, setting up Job Master");
setupJobMasterStage(mantisJobMetaData.getSchedulingInfo());
}
LOGGER.info("Storing job");
jobStore.storeNewJob(mantisJobMetaData);
}
LOGGER.info("Stored mantis job");
this.workerManager = new WorkerManager(this, jobClusterDefinition.getWorkerMigrationConfig(),
this.mantisScheduler, isSubmit);
long checkAgainInSeconds = getWorkerTimeoutSecs();
long refreshStageAssignementsDurationMs = ConfigurationProvider.getConfig()
.getStageAssignmentRefreshIntervalMs();
getTimers().startPeriodicTimer(CHECK_HB_TIMER_KEY, new JobProto.CheckHeartBeat(),
Duration.ofSeconds(checkAgainInSeconds));
// -1 indicates disabled, which means all updates will be sent immediately
if (refreshStageAssignementsDurationMs > 0) {
getTimers().startPeriodicTimer(
REFRESH_SEND_STAGE_ASSIGNEMNTS_KEY,
new JobProto.SendWorkerAssignementsIfChanged(),
Duration.ofMillis(refreshStageAssignementsDurationMs));
}
mantisJobMetaData.getJobDefinition().getJobSla().getRuntimeLimitSecs();
LOGGER.info("Job {} initialized", this.jobId);
}
private long getWorkerTimeoutSecs() {
if (mantisJobMetaData.getWorkerTimeoutSecs() > 0) {
return mantisJobMetaData.getWorkerTimeoutSecs();
} else {
return ConfigurationProvider.getConfig().getDefaultWorkerTimeoutSecs();
}
}
private void setupJobMasterStage(SchedulingInfo schedulingInfo)
throws io.mantisrx.runtime.command.InvalidJobException {
LOGGER.info("Job {} is autoscaled setting up Job Master", this.jobId);
if (schedulingInfo.forStage(0) == null) {
// create stage 0 schedulingInfo only if not already provided
// jobMaster stage itself is not scaled
schedulingInfo.addJobMasterStage(StageSchedulingInfo.builder()
.numberOfInstances(1)
.machineDefinition(getJobMasterMachineDef())
.build());
// Update jobMetadata with the new stage added
mantisJobMetaData = new MantisJobMetadataImpl.Builder(mantisJobMetaData)
.withJobDefinition(
new JobDefinition.Builder()
.from(mantisJobMetaData.getJobDefinition())
.withSchedulingInfo(schedulingInfo)
.withNumberOfStages(schedulingInfo.getStages().size())
.build())
.build();
}
hasJobMaster = true;
}
private MachineDefinition getJobMasterMachineDef() {
MasterConfiguration config = ConfigurationProvider.getConfig();
if (config != null) {
return new MachineDefinition(
config.getJobMasterCores(), config.getJobMasterMemoryMB(), config.getJobMasterNetworkMbps(),
config.getJobMasterDiskMB(), 1
);
} else {
return new MachineDefinition(
DEFAULT_JOB_MASTER_CORES, DEFAULT_JOB_MASTER_MEM, DEFAULT_JOB_MASTER_NW,
DEFAULT_JOB_MASTER_DISK, 1);
}
}
@Override
public void preStart() throws Exception {
LOGGER.info("Job Actor {}-{} started", clusterName, jobId);
}
@Override
public void postStop() throws Exception {
LOGGER.info("Job Actor {} stopped invoking cleanup logic", jobId);
if (metricsGroupId != null) {
MetricsRegistry.getInstance().remove(metricsGroupId);
}
//shutdown();
}
@Override
public SupervisorStrategy supervisorStrategy() {
// custom supervisor strategy to resume the child actors on Exception instead of the default restart
return MantisActorSupervisorStrategy.getInstance().create();
}
@Override
public Receive createReceive() {
return getInitializingBehavior();
}
private String genUnexpectedMsg(String event, String cluster, String state) {
return String.format("Unexpected message %s received by Job actor %s in %s State", event, cluster, state);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
Job Actor behaviors 12 total
* - Init
* - GET
* - LIST workers
* - GET SCHED INFO
* - SCALE
* - KILL
* - RESUBMIT WORKER
* - WorkerEvent
*
* // SELF SENT
* - HB enforcement
* - Runtime enforcement
* - Self Destruct
* - Refresh Stage Assignments
*/
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* A Terminating Job allows. - GET - LIST workers - WorkerEvent
*
* @return
*/
private Receive getTerminatingBehavior() {
String state = "terminating";
return receiveBuilder()
// EXPECTED MESSAGES BEGIN//
// get Job Details
.match(GetJobDetailsRequest.class, this::onGetJobDetails)
// process request to get the given job definition updated with this job actor.
.match(GetJobDefinitionUpdatedFromJobActorRequest.class, this::onGetJobDefinitionUpdatedFromJobActor)
// list active workers request
.match(ListWorkersRequest.class, this::onListActiveWorkers)
// EXPECTED MESSAGES END//
// UNEXPECTED MESSAGES BEGIN //
// Worker related events
.match(WorkerEvent.class, (x) -> LOGGER.warn("Job {} is Terminating, ignoring worker Events {}",
this.jobId.getId(), x))
.match(InitJob.class, (x) -> getSender().tell(new JobInitialized(x.requestId, SUCCESS,
genUnexpectedMsg(x.toString(), this.jobId.getId(), state), this.jobId, x.requstor), getSelf()))
// explicit resubmit worker
.match(ResubmitWorkerRequest.class, (x) -> getSender().tell(new ResubmitWorkerResponse(x.requestId,
CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.jobId.getId(), state)), getSelf()))
// Heart beat accounting timers
.match(JobProto.CheckHeartBeat.class, (x) -> LOGGER.warn(genUnexpectedMsg(x.toString(),
this.jobId.getId(), state)))
// runtime limit reached
.match(JobProto.RuntimeLimitReached.class, (x) -> LOGGER.warn(genUnexpectedMsg(x.toString(),
this.jobId.getId(), state)))
// Kill job request
.match(JobClusterProto.KillJobRequest.class, (x) -> getSender().tell(new KillJobResponse(x.requestId,
SUCCESS, JobState.Noop, genUnexpectedMsg(x.toString(), this.jobId.getId(), state),
this.jobId, x.user), getSelf()))
// scale stage request
.match(ScaleStageRequest.class, (x) -> getSender().tell(new ScaleStageResponse(x.requestId,
CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.jobId.getId(), state),
0), getSelf()))
// scheduling Info observable
.match(GetJobSchedInfoRequest.class, (x) -> getSender().tell(
new GetJobSchedInfoResponse(x.requestId, CLIENT_ERROR,
genUnexpectedMsg(x.toString(), this.jobId.getId(), state), empty()), getSelf()))
.match(GetLatestJobDiscoveryInfoRequest.class, (x) -> getSender().tell(
new GetLatestJobDiscoveryInfoResponse(x.requestId, CLIENT_ERROR,
genUnexpectedMsg(x.toString(), this.jobId.getId(), state), empty()), getSelf()))
.match(
JobProto.SendWorkerAssignementsIfChanged.class,
(x) -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state)))
.match(KillJobResponse.class, (x) -> LOGGER.info("Received Kill Job Response in"
+ "Terminating State Ignoring"))
.matchAny(x -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state)))
// UNEXPECTED MESSAGES END
.build();
}
/**
* A Terminated Job allows. - GET - LIST workers
*
* @return
*/
private Receive getTerminatedBehavior() {
String state = "terminated";
return receiveBuilder()
// EXPECTED MESSAGES BEGIN//
// get Job Details
.match(GetJobDetailsRequest.class, this::onGetJobDetails)
// process request to get the given job definition updated by this job actor.
.match(GetJobDefinitionUpdatedFromJobActorRequest.class, this::onGetJobDefinitionUpdatedFromJobActor)
// list active workers request
.match(ListWorkersRequest.class, this::onListActiveWorkers)
// EXPECTED MESSAGES END//
// UNEXPECTED MESSAGES BEGIN //
.match(InitJob.class, (x) -> getSender().tell(
new JobInitialized(x.requestId, SUCCESS, genUnexpectedMsg(
x.toString(), this.jobId.getId(), state), this.jobId, x.requstor), getSelf()))
// explicit resubmit worker
.match(ResubmitWorkerRequest.class, (x) -> getSender().tell(
new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR,
genUnexpectedMsg(x.toString(), this.jobId.getId(), state)), getSelf()))
// Heart beat accounting timers
.match(JobProto.CheckHeartBeat.class, (x) -> LOGGER.warn(
genUnexpectedMsg(x.toString(), this.jobId.getId(), state)))
// Migrate worker request
.match(JobProto.MigrateDisabledVmWorkersRequest.class, (x) -> LOGGER.warn(
genUnexpectedMsg(x.toString(), this.jobId.getId(), state)))
// runtime limit reached
.match(JobProto.RuntimeLimitReached.class, (x) -> LOGGER.warn(
genUnexpectedMsg(x.toString(), this.jobId.getId(), state)))
// Kill job request
.match(JobClusterProto.KillJobRequest.class, (x) -> getSender().tell(
new KillJobResponse(x.requestId, SUCCESS, JobState.Noop,
genUnexpectedMsg(x.toString(), this.jobId.getId(), state), this.jobId, x.user),
getSelf()))
// scale stage request
.match(ScaleStageRequest.class, (x) -> getSender().tell(
new ScaleStageResponse(x.requestId, CLIENT_ERROR,
genUnexpectedMsg(x.toString(), this.jobId.getId(), state), 0),
getSelf()))
// scheduling Info observable
.match(GetJobSchedInfoRequest.class, (x) -> getSender().tell(
new GetJobSchedInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(
x.toString(), this.jobId.getId(), state), empty()), getSelf()))
.match(GetLatestJobDiscoveryInfoRequest.class, (x) -> getSender().tell(
new GetLatestJobDiscoveryInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(
x.toString(), this.jobId.getId(), state), empty()), getSelf()))
.match(KillJobResponse.class, (x) -> LOGGER.info("Received Kill Job Response in"
+ "Terminating State Ignoring"))
.match(JobProto.SendWorkerAssignementsIfChanged.class, (x) -> LOGGER.warn(genUnexpectedMsg(
x.toString(), this.jobId.getId(), state)))
// Worker related events
.match(WorkerEvent.class, (x) -> LOGGER.info("Received worker event in Terminated State Ignoring"))
.matchAny(x -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state)))
// UNEXPECTED MESSAGES END
.build();
}
/**
* An active job allows. - GET - LIST workers - GET SCHED INFO - SCALE - KILL - RESUBMIT WORKER - WorkerEvent - HB
* enforcement - Runtime enforcement - Refresh Stage Assignments
*
* @return
*/
private Receive getActiveBehavior() {
String state = "active";
// get Job Details
return receiveBuilder()
// EXPECTED MESSAGES BEGIN//
.match(GetJobDetailsRequest.class, this::onGetJobDetails)
// process request to get the given job definition updated by this job actor.
.match(GetJobDefinitionUpdatedFromJobActorRequest.class, this::onGetJobDefinitionUpdatedFromJobActor)
// Worker related events
.match(WorkerEvent.class, r -> processWorkerEvent(r))
// explicit resubmit worker
.match(ResubmitWorkerRequest.class, this::onResubmitWorker)
// Heart beat accounting timers
.match(JobProto.CheckHeartBeat.class, this::onCheckHeartBeats)
// Migrate workers from disabled VMs
.match(JobProto.MigrateDisabledVmWorkersRequest.class, this::onMigrateWorkers)
// runtime limit reached
.match(JobProto.RuntimeLimitReached.class, this::onRuntimeLimitReached)
// Kill job request
.match(JobClusterProto.KillJobRequest.class, this::onJobKill)
// scale stage request
.match(ScaleStageRequest.class, this::onScaleStage)
// list active workers request
.match(ListWorkersRequest.class, this::onListActiveWorkers)
// scheduling Info observable
.match(GetJobSchedInfoRequest.class, this::onGetJobStatusSubject)
.match(GetLatestJobDiscoveryInfoRequest.class, this::onGetLatestJobDiscoveryInfo)
.match(JobProto.SendWorkerAssignementsIfChanged.class, this::onSendWorkerAssignments)
// EXPECTED MESSAGES END//
// UNEXPECTED MESSAGES BEGIN //
.match(InitJob.class, (x) -> getSender().tell(new JobInitialized(x.requestId, SUCCESS,
genUnexpectedMsg(x.toString(), this.jobId.getId(), state), this.jobId, x.requstor), getSelf()))
.matchAny(x -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state)))
// UNEXPECTED MESSAGES END //
.build();
}
/**
* INITIALIZED JOB allows. - GET - LIST workers - GET SCHED INFO - KILL - WorkerEvent - HB enforcement - REFRESH
* STAGE scheduling info
*
* @return
*/
private Receive getInitializedBehavior() {
String state = "initialized";
return receiveBuilder()
// EXPECTED MESSAGES BEGIN//
// get Job Details
.match(GetJobDetailsRequest.class, this::onGetJobDetails)
// process request to get the given job definition updated by this job actor.
.match(GetJobDefinitionUpdatedFromJobActorRequest.class, this::onGetJobDefinitionUpdatedFromJobActor)
// Worker related events
.match(WorkerEvent.class, r -> processWorkerEvent(r))
// Heart beat accounting timers
.match(JobProto.CheckHeartBeat.class, this::onCheckHeartBeats)
// Migrate workers from disabled VMs
.match(JobProto.MigrateDisabledVmWorkersRequest.class, this::onMigrateWorkers)
// Kill job request
.match(JobClusterProto.KillJobRequest.class, this::onJobKill)
// list active workers request
.match(ListWorkersRequest.class, this::onListActiveWorkers)
.match(GetJobSchedInfoRequest.class, this::onGetJobStatusSubject)
.match(GetLatestJobDiscoveryInfoRequest.class, this::onGetLatestJobDiscoveryInfo)
.match(JobProto.SendWorkerAssignementsIfChanged.class, this::onSendWorkerAssignments)
// EXPECTED MESSAGES END//
// UNEXPECTED MESSAGES BEGIN //
// explicit resubmit worker
.match(ResubmitWorkerRequest.class, (x) -> getSender().tell(
new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(
x.toString(), this.jobId.getId(), state)), getSelf()))
// runtime limit reached
.match(JobProto.RuntimeLimitReached.class, (x) -> LOGGER.warn(genUnexpectedMsg(
x.toString(), this.jobId.getId(), state)))
// scale stage request
.match(ScaleStageRequest.class, (x) -> getSender().tell(
new ScaleStageResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(
x.toString(), this.jobId.getId(), state), 0), getSelf()))
.match(InitJob.class, (x) -> getSender().tell(new JobInitialized(x.requestId, SUCCESS,
genUnexpectedMsg(x.toString(), this.jobId.getId(), state), this.jobId, x.requstor), getSelf()))
.matchAny(x -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state)))
// UNEXPECTED MESSAGES END //
.build();
}
/**
* AN INITIALIZING JOB ALLOWS. - Init Job
*
* @return
*/
private Receive getInitializingBehavior() {
String state = "initializing";
return receiveBuilder()
// EXPECTED MESSAGES BEING//
.match(InitJob.class, this::onJobInitialize)
// EXPECTED MESSAGES END//
//UNEXPECTED MESSAGES BEGIN //
// get Job Details
.match(GetJobDetailsRequest.class, (x) -> getSender().tell(
new GetJobDetailsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(
x.toString(), this.jobId.getId(), state), empty()), getSelf()))
// no invalid metadata to use, return intermediate job definition directly
.match(
GetJobDefinitionUpdatedFromJobActorRequest.class,
(r) -> getSender().tell(
new JobClusterManagerProto.GetJobDefinitionUpdatedFromJobActorResponse(
r.requestId, SUCCESS, "", r.getUser(), r.getJobDefinition(),
r.isAutoResubmit(), r.isQuickSubmit(), r.getOriginalSender()),
getSelf()))
// Worker related events
.match(WorkerEvent.class, (x) -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state)))
// explicit resubmit worker
.match(ResubmitWorkerRequest.class, (x) -> getSender().tell(
new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(
x.toString(), this.jobId.getId(), state)), getSelf()))
// Heart beat accounting timers
.match(JobProto.CheckHeartBeat.class, (x) -> LOGGER.warn(genUnexpectedMsg(
x.toString(), this.jobId.getId(), state)))
// Migrate workers request
.match(JobProto.MigrateDisabledVmWorkersRequest.class, (x) -> LOGGER.warn(genUnexpectedMsg(
x.toString(), this.jobId.getId(), state)))
// runtime limit reached
.match(JobProto.RuntimeLimitReached.class, (x) -> LOGGER.warn(genUnexpectedMsg(
x.toString(), this.jobId.getId(), state)))
// Kill job request
.match(JobClusterProto.KillJobRequest.class, (x) -> getSender().tell(
new KillJobResponse(x.requestId, CLIENT_ERROR, JobState.Noop, genUnexpectedMsg(
x.toString(), this.jobId.getId(), state), this.jobId, x.user), getSelf()))
// scale stage request
.match(ScaleStageRequest.class, (x) -> getSender().tell(
new ScaleStageResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(
x.toString(), this.jobId.getId(), state), 0), getSelf()))
// list active workers request
.match(ListWorkersRequest.class, (x) -> getSender().tell(
new ListWorkersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(
x.toString(), this.jobId.getId(), state), Lists.newArrayList()), getSelf()))
// scheduling Info observable
.match(GetJobSchedInfoRequest.class, (x) -> getSender().tell(
new GetJobSchedInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(
x.toString(), this.jobId.getId(), state), empty()), getSelf()))
// latest scheduling Info
.match(GetLatestJobDiscoveryInfoRequest.class, (x) -> getSender().tell(
new GetLatestJobDiscoveryInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(
x.toString(), this.jobId.getId(), state), empty()), getSelf()))
//UNEXPECTED MESSAGES END //
.matchAny(x -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state)))
.build();
}
//////////////////////////////////////////// Akka Messages sent to the Job Actor Begin/////////////////////
@Override
public void onJobInitialize(InitJob i) {
ActorRef sender = getSender();
try {
initialize(i.isSubmit);
if (JobState.isRunningState(mantisJobMetaData.getState())) {
getContext().become(activeBehavior);
setRuntimeLimitTimersIfRequired(Instant.now());
} else {
getContext().become(initializedBehavior);
}
sender.tell(new JobInitialized(i.requestId, SUCCESS, String.format(
"Job %s initialized successfully", jobId), jobId, i.requstor), getSelf());
} catch (Exception e) {
LOGGER.error("Exception initializing job ", e);
sender.tell(
new JobInitialized(i.requestId, SERVER_ERROR, "" + e.getMessage(), jobId, i.requstor),
getSelf());
}
}
/**
* Return information related to this job.
*
* @param r
*/
@Override
public void onGetJobDetails(GetJobDetailsRequest r) {
ActorRef sender = getSender();
sender.tell(new GetJobDetailsResponse(r.requestId, SUCCESS, "", of(getJobDetails())), getSelf());
}
public void onGetJobDefinitionUpdatedFromJobActor(GetJobDefinitionUpdatedFromJobActorRequest r) {
ActorRef sender = getSender();
sender.tell(
getIntermediateJobDefinition(r),
getSelf());
}
/**
* Return a BehaviorSubject that streams worker lifecycle events to the user.
*
* @param r
*/
@Override
public void onGetJobStatusSubject(GetJobSchedInfoRequest r) {
ActorRef sender = getSender();
if (r.getJobId().equals(this.jobId)) {
sender.tell(new GetJobSchedInfoResponse(r.requestId, SUCCESS, "",
of(workerManager.getJobStatusSubject())), getSelf());
} else {
String msg = "JobId in the request " + r.getJobId() + " does not match Job Actors job Id " + this.jobId;
LOGGER.warn(msg);
sender.tell(new GetJobSchedInfoResponse(r.requestId, CLIENT_ERROR, msg, empty()), getSelf());
}
}
@Override
public void onGetLatestJobDiscoveryInfo(GetLatestJobDiscoveryInfoRequest r) {
ActorRef sender = getSender();
if (r.getJobCluster().equals(this.jobId.getCluster())) {
JobSchedulingInfo schedulingInfo = workerManager.getJobStatusSubject().getValue();
if (schedulingInfo != null) {
sender.tell(new GetLatestJobDiscoveryInfoResponse(r.requestId, SUCCESS, "",
ofNullable(schedulingInfo)), getSelf());
} else {
LOGGER.info("discoveryInfo from BehaviorSubject is null {}", jobId);
sender.tell(new GetLatestJobDiscoveryInfoResponse(r.requestId,
SERVER_ERROR,
"discoveryInfo from BehaviorSubject is null " + jobId,
empty()), getSelf());
}
} else {
String msg = "JobCluster in the request " + r.getJobCluster() + " does not match Job Actors job ID "
+ this.jobId;
LOGGER.warn(msg);
sender.tell(new GetLatestJobDiscoveryInfoResponse(r.requestId, SERVER_ERROR, msg, empty()), getSelf());
}
}
/**
* Worker Events sent by the worker itself of the Scheduling Service.
*/
@Override
public void processWorkerEvent(final WorkerEvent e) {
this.workerManager.processEvent(e, mantisJobMetaData.getState());
}
/**
* Resubmit a specific worker Index.
*/
@Override
public void onResubmitWorker(final ResubmitWorkerRequest r) {
ActorRef sender = getSender();
try {
eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(INFO,
r.getWorkerNum() + " workerNum resubmit requested by " + r.getUser() + " , reason: "
+ r.getReason(),
getJobId(), getJobState()));
this.workerManager.resubmitWorker(r.getWorkerNum());
numWorkerResubmissions.increment();
sender.tell(new ResubmitWorkerResponse(r.requestId, SUCCESS,
String.format("Worker %d of job %s resubmitted", r.getWorkerNum(), r.getJobId())), getSelf());
} catch (Exception e) {
sender.tell(new ResubmitWorkerResponse(r.requestId, SERVER_ERROR, e.getMessage()), getSelf());
}
}
@Override
public void onMigrateWorkers(final JobProto.MigrateDisabledVmWorkersRequest r) {
workerManager.migrateDisabledVmWorkers(r.time);
}
/**
* Invoked periodically to check heart beat status of the workers.
*
* @param r
*/
@Override
public void onCheckHeartBeats(final JobProto.CheckHeartBeat r) {
this.workerManager.checkHeartBeats(r.getTime());
}
@Override
public void onRuntimeLimitReached(final JobProto.RuntimeLimitReached r) {
LOGGER.info("In onRuntimeLimitReached {} for Job {} ", Instant.now(), this.jobId);
LOGGER.info("Job {} Started at {} and killed at {} due to Runtime limit reached", jobId,
mantisJobMetaData.getStartedAtInstant().orElse(Instant.now()), Instant.now());
getContext().getParent().tell(new JobClusterProto.KillJobRequest(jobId,
"runtime limit reached", JobCompletedReason.Killed,
MANTIS_MASTER_USER, ActorRef.noSender()), getSelf());
}
@Override
public void onSendWorkerAssignments(final JobProto.SendWorkerAssignementsIfChanged r) {
this.workerManager.refreshAndSendWorkerAssignments();
}
/**
* Will update Job state to terminal. Unschedule all workers Update worker state as failed in DB Archive job Self
* destruct
* <p>
* Worker terminated events will get ignored.
*
* @param req
*/
@Override
public void onJobKill(JobClusterProto.KillJobRequest req) {
ActorRef sender = getSender();
LOGGER.info("Shutting down job {} on request by {}", jobId, sender);
try {
eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(INFO,
"Killing job, reason: " + req.reason,
getJobId(), getJobState()));
JobState newState;
if (req.jobCompletedReason.equals(JobCompletedReason.Error)
|| req.jobCompletedReason.equals(JobCompletedReason.Lost)) {
newState = JobState.Failed;
} else {
newState = JobState.Completed;
}
// update job state
updateStateAndPersist(newState);
// inform caller
sender.tell(new JobClusterProto.KillJobResponse(req.requestId, SUCCESS, getJobState(), getJobId()
+ " terminated", getJobId(), this.mantisJobMetaData, req.user, req.requestor), getSelf());
// continue with rest of the shutdown
getTimers().cancel(CHECK_HB_TIMER_KEY);
getContext().become(terminatingBehavior);
// shutdown workers
shutdown(newState, req.reason);
// take poison pill
performFinalShutdown();
} catch (Exception e) {
LOGGER.error("Failed to kill job {}", jobId, e);
sender.tell(new JobClusterProto.KillJobResponse(req.requestId, SERVER_ERROR, getJobState(),
getJobId() + " Could not be terminated due to " + e.getMessage(), getJobId(),
this.mantisJobMetaData, req.user, req.requestor), getSelf());
}
}
@Override
public void onScaleStage(ScaleStageRequest scaleStage) {
LOGGER.info("In Scale stage {} for Job {}", scaleStage, this.jobId);
ActorRef sender = getSender();
Optional<IMantisStageMetadata> stageMeta = this.mantisJobMetaData.getStageMetadata(scaleStage.getStageNum());
// Make sure stage is valid
if (!stageMeta.isPresent()) {
LOGGER.warn("Stage {} does not exist in Job {}", scaleStage.getStageNum(), this.jobId);
sender.tell(new ScaleStageResponse(scaleStage.requestId, CLIENT_ERROR, "Non existent stage "
+ scaleStage.getStageNum(), 0), getSelf());
return;
}
// Make sure stage is scalable
MantisStageMetadataImpl stageMetaData = (MantisStageMetadataImpl) stageMeta.get();
if (!stageMetaData.getScalable()) {
LOGGER.warn("Stage {} is not scalable in Job {}", scaleStage.getStageNum(), this.jobId);
eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(
LifecycleEventsProto.StatusEvent.StatusEventType.WARN,
"Can't change #workers to " + scaleStage.getNumWorkers() + ", stage "
+ scaleStage.getStageNum() + " is not scalable", getJobId(), getJobState()));
sender.tell(new ScaleStageResponse(scaleStage.requestId, CLIENT_ERROR, "Stage "
+ scaleStage.getStageNum() + " is not scalable", 0), getSelf());
return;
}
try {
int actualScaleup = this.workerManager.scaleStage(stageMetaData, scaleStage.getNumWorkers(),
scaleStage.getReason());
LOGGER.info("Scaled stage {} to {} workers for Job {}", scaleStage.getStageNum(), actualScaleup,
this.jobId);
numScaleStage.increment();
sender.tell(new ScaleStageResponse(scaleStage.requestId, SUCCESS,
String.format("Scaled stage %d to %d workers", scaleStage.getStageNum(), actualScaleup),
actualScaleup), getSelf());
} catch (Exception e) {
String msg = String.format("Stage %d scale failed due to %s", scaleStage.getStageNum(), e.getMessage());
LOGGER.error(msg, e);
sender.tell(new ScaleStageResponse(scaleStage.requestId, SERVER_ERROR, msg, 0), getSelf());
}
}
/**
* Responds with {@link ListWorkersResponse} object containing data about all active workers.
*
* @param listWorkersRequest
*/
public void onListActiveWorkers(ListWorkersRequest listWorkersRequest) {
ActorRef sender = getSender();
List<IMantisWorkerMetadata> activeWorkers = this.workerManager.getActiveWorkers(listWorkersRequest.getLimit());
sender.tell(new ListWorkersResponse(listWorkersRequest.requestId, SUCCESS, "",
Collections.unmodifiableList(activeWorkers)), getSelf());
}
//////////////////////////////////////////// Akka Messages sent to the Job Actor End/////////////////////////
/////////////////////////////////////////// Internal State change events Begin //////////////////////////////
private void performFinalShutdown() {
try {
LOGGER.info("Archiving Job {}", this.jobId);
jobStore.archiveJob(mantisJobMetaData);
} catch (IOException e) {
LOGGER.warn("Exception archiving job " + mantisJobMetaData.getJobId(), e);
}
getContext().become(terminatedBehavior);
// commit suicide
getSelf().tell(PoisonPill.getInstance(), ActorRef.noSender());
}
/**
* Invoked when all workers are in terminal state. Should get called only during shutdown process
*/
@Override
public void onAllWorkersCompleted() {
LOGGER.info("JobActor: onAllWorkersCompleted with current state {}", mantisJobMetaData.getState());
if (!JobState.isTerminalState(mantisJobMetaData.getState()) && !allWorkersCompleted) {
LOGGER.info("All workers completed but job {} in {} state. Request termination", jobId, getJobState());
allWorkersCompleted = true;
getContext().parent().tell(
new JobClusterProto.KillJobRequest(
jobId, "Job Completed", JobCompletedReason.Normal, MANTIS_MASTER_USER,
ActorRef.noSender()), getSelf());
numWorkersCompletedNotTerminal.increment();
} else {
// job kill has already been requested, ignore
LOGGER.debug("Job {} Kill already requested", this.jobId);
}
}
/**
* Should get called only once after all workers have started.
*/
@Override
public boolean onAllWorkersStarted() {
LOGGER.info("In onAllWorkersStarted for Job {}", jobId);
boolean isSuccess = true;
if (mantisJobMetaData.getState() == JobState.Accepted) {
try {
// update record in storage
updateStateAndPersist(JobState.Launched);
// update behavior to active
getContext().become(activeBehavior);
eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(INFO,
"all workers started, job transitioning to Active", getJobId(), getJobState()));
// inform job cluster manager that the job has started
getContext().getParent().tell(new JobClusterProto.JobStartedEvent(getJobId()), getSelf());
// kick off max runtime timer if needed
Instant currentTime = Instant.now();
// Update start time and persist state
mantisJobMetaData.setStartedAt(currentTime.toEpochMilli(), jobStore);
setRuntimeLimitTimersIfRequired(currentTime);
} catch (Exception e) {
LOGGER.error("Error processing all worker started event ", e);
isSuccess = false;
}
} else if (mantisJobMetaData.getState() == JobState.Launched) {
// no op
LOGGER.info("Job is already in launched state");
isSuccess = false;
} else {
// something is wrong!
LOGGER.warn("Unexpected all Workers Started Event while job in {} state", mantisJobMetaData.getState());
isSuccess = false;
}
return isSuccess;
}
/**
* Invoked if workers have been relaunched too many times. Request this job to be terminated and marked as failed
*/
@Override
public boolean onTooManyWorkerResubmits() {
LOGGER.warn("Too many worker resubmits detected for Job {}. Requesting job shutdown", jobId);
boolean isSuccess = true;
eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(ERROR,
"Worker Resubmit limit reached, shutting down job", getJobId(), getJobState()));
numWorkerResubmitLimitReached.increment();
//updateStateAndPersist(JobState.Terminating_abnormal);
// ask Parent to shut it down
getContext().parent().tell(
new JobClusterProto.KillJobRequest(
jobId, "Too many worker resubmits", JobCompletedReason.Error, MANTIS_MASTER_USER,
ActorRef.noSender()), getSelf());
return isSuccess;
}
//////////////////////////////////Internal State Change Events END //////////////////////////////////////
/**
* Retuns the details of this job.
*/
@Override
public IMantisJobMetadata getJobDetails() {
return this.mantisJobMetaData;
}
public JobClusterManagerProto.GetJobDefinitionUpdatedFromJobActorResponse getIntermediateJobDefinition(
GetJobDefinitionUpdatedFromJobActorRequest r) {
final JobDefinition givenJobDefn = r.getJobDefinition();
final boolean forceInheritance = r.isQuickSubmit();
IMantisJobMetadata lastJobMeta = this.mantisJobMetaData;
JobDefinition.Builder jobDefnBuilder = new JobDefinition.Builder().fromWithInstanceCountInheritance(
givenJobDefn,
forceInheritance,
(stageId) -> lastJobMeta.getStageMetadata(stageId).map(IMantisStageMetadata::getNumWorkers));
try {
JobDefinition mergedJobDefn = jobDefnBuilder.build();
return new JobClusterManagerProto.GetJobDefinitionUpdatedFromJobActorResponse(
r.requestId, SUCCESS, "", r.getUser(), mergedJobDefn, r.isAutoResubmit(),
r.isQuickSubmit(), r.getOriginalSender());
} catch (io.mantisrx.runtime.command.InvalidJobException ije) {
LOGGER.error("Failed to build job definition with inheritance:", ije);
return new JobClusterManagerProto.GetJobDefinitionUpdatedFromJobActorResponse(
r.requestId, SERVER_ERROR, ije.getMessage(), r.getUser(), null, r.isAutoResubmit(),
r.isQuickSubmit(), r.getOriginalSender());
}
}
/**
* Triggered when the JobActor receives the Job Kill message. it will update the state of the job to terminating in
* the persistence layer and request the workers to be terminated.
*
* @param state
*/
@Override
public void shutdown(JobState state, String reason) {
LOGGER.info("Entering JobActor:shutdown {}", jobId);
workerManager.shutdown();
eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(INFO,
"job shutdown, reason: " + reason,
getJobId(), state));
eventPublisher.publishAuditEvent(new LifecycleEventsProto.AuditEvent(
LifecycleEventsProto.AuditEvent.AuditEventType.JOB_TERMINATE,
jobId.getId(), "job shutdown, reason: " + reason));
}
@Override
public JobId getJobId() {
return this.jobId;
}
private void updateStateAndPersist(JobState newState) throws Exception {
mantisJobMetaData.setJobState(newState, jobStore);
}
/**
* Always invoked after the job has transitioned to started state.
*
* @param currentTime
*/
private void setRuntimeLimitTimersIfRequired(Instant currentTime) {
long maxRuntimeSecs = mantisJobMetaData.getJobDefinition().getJobSla().getRuntimeLimitSecs();
Instant startedAt = mantisJobMetaData.getStartedAtInstant().orElse(currentTime);
long terminateJobInSecs;
if (maxRuntimeSecs > 0) {
terminateJobInSecs = JobHelper.calculateRuntimeDuration(maxRuntimeSecs, startedAt);
LOGGER.info("Will terminate Job {} at {} ", jobId, (currentTime.plusSeconds(terminateJobInSecs)));
getTimers().startSingleTimer("RUNTIME_LIMIT", new JobProto.RuntimeLimitReached(),
Duration.ofSeconds(terminateJobInSecs));
} else {
LOGGER.info("maxRuntime for Job {} is {} ignore ", jobId, mantisJobMetaData.getJobDefinition()
.getJobSla().getRuntimeLimitSecs());
}
}
@Override
public JobState getJobState() {
return mantisJobMetaData.getState();
}
private boolean isAutoscaled(SchedulingInfo schedulingInfo) {
for (Map.Entry<Integer, StageSchedulingInfo> entry : schedulingInfo.getStages().entrySet()) {
final StageScalingPolicy scalingPolicy = entry.getValue().getScalingPolicy();
if (scalingPolicy != null && scalingPolicy.isEnabled()) {
LOGGER.info("Job {} is autoscaleable", jobId);
return true;
}
}
LOGGER.info("Job {} is NOT scaleable", jobId);
return false;
}
/*package protected*/
/**
* Returns the calculated subscription timeout in seconds for this job.
*
* @param mjmd
* @return
*/
static long getSubscriptionTimeoutSecs(final IMantisJobMetadata mjmd) {
// if perpetual job there is no subscription timeout
if (mjmd.getJobDefinition().getJobSla().getDurationType() == MantisJobDurationType.Perpetual) {
return 0;
}
return mjmd.getSubscriptionTimeoutSecs() == 0
? ConfigurationProvider.getConfig().getEphemeralJobUnsubscribedTimeoutSecs()
: mjmd.getSubscriptionTimeoutSecs();
}
static long getHeartbeatIntervalSecs(final IMantisJobMetadata mjmd) {
if (mjmd.getHeartbeatIntervalSecs() > 0) {
return mjmd.getHeartbeatIntervalSecs();
}
return ConfigurationProvider.getConfig().getDefaultWorkerHeartbeatIntervalSecs();
}
/**
* Keeps track of the last used worker number and mints a new one every time a worker is scheduled.
*/
static class WorkerNumberGenerator {
private static final Logger LOGGER = LoggerFactory.getLogger(WorkerNumberGenerator.class);
private static final int DEFAULT_INCREMENT_STEP = 10;
private final int incrementStep;
private int lastUsed;
private int currLimit;
private volatile boolean hasErrored = false;
/**
* Creates an instance of this class.
*
* @param lastUsed
* @param incrementStep
*/
WorkerNumberGenerator(int lastUsed, int incrementStep) {
Preconditions.checkArgument(lastUsed >= 0,
"Last Used worker Number cannot be negative {} ", lastUsed);
Preconditions.checkArgument(incrementStep >= 1,
"incrementStepcannot be less than 1 {} ", incrementStep);
this.lastUsed = lastUsed;
this.currLimit = lastUsed;
this.incrementStep = incrementStep;
}
/**
* Default constructor sets last used number to 0.
*/
WorkerNumberGenerator() {
this(0, DEFAULT_INCREMENT_STEP);
}
private void advance(MantisJobMetadataImpl mantisJobMetaData, MantisJobStore jobStore) {
try {
currLimit += incrementStep;
mantisJobMetaData.setNextWorkerNumberToUse(currLimit, jobStore);
} catch (Exception e) {
hasErrored = true;
LOGGER.error("Exception setting next Worker number to use ", e);
throw new RuntimeException("Unexpected error setting next worker number to use", e);
}
}
/**
* Get the next unused worker number.
* <p>
* For performance reasosns, this object updates state in persistence every N calls made to this method.
*
* @return The next worker number to use for new workers
* @throws IllegalStateException if there was an error saving the next worker number to use to the job store
*/
int getNextWorkerNumber(MantisJobMetadataImpl mantisJobMetaData, MantisJobStore jobStore) {
if (hasErrored) {
throw new IllegalStateException("Unexpected: Invalid state likely due to getting/setting"
+ "next worker number");
}
if (lastUsed == currLimit) {
advance(mantisJobMetaData, jobStore);
}
return ++lastUsed;
}
}
/**
* Responsible for managing worker related state of this job.
*/
class WorkerManager implements IWorkerManager {
private static final int WORKER_RESUBMIT_LIMIT = 100;
private ObjectMapper mapper = new ObjectMapper();
private final WorkerNumberGenerator workerNumberGenerator;
private boolean allWorkersStarted = false;
private final IMantisJobManager jobMgr;
private ConcurrentSkipListSet<Integer> workersToMigrate = new ConcurrentSkipListSet<>();
private int sinkStageNum;
private final MigrationStrategy migrationStrategy;
private final MantisScheduler scheduler;
private long lastWorkerMigrationTimestamp = Long.MIN_VALUE;
private Map<Integer, WorkerAssignments> stageAssignments = new HashMap<>();
private BehaviorSubject<JobSchedulingInfo> jobSchedulingInfoBehaviorSubject;
private String currentJobSchedulingInfoStr = null;
private final WorkerResubmitRateLimiter resubmitRateLimiter = new WorkerResubmitRateLimiter();
// Use expiring cache to effectively track worker resubmitted in the last hour.
private Cache<Integer, Boolean> recentErrorWorkersCache = CacheBuilder.newBuilder()
.expireAfterWrite(1, TimeUnit.HOURS)
.build();
private volatile boolean stageAssignmentPotentiallyChanged;
/**
* Creates an instance of this class.
*
* @param jobMgr
* @param migrationConfig
* @param scheduler
* @param isSubmit
* @throws Exception
*/
WorkerManager(
IMantisJobManager jobMgr, WorkerMigrationConfig migrationConfig, MantisScheduler scheduler,
boolean isSubmit) throws Exception {
workerNumberGenerator = new WorkerNumberGenerator((isSubmit) ? 0
: jobMgr.getJobDetails().getNextWorkerNumberToUse(), WorkerNumberGenerator.DEFAULT_INCREMENT_STEP);
this.scheduler = scheduler;
this.jobMgr = jobMgr;
migrationStrategy = MigrationStrategyFactory.getStrategy(jobId.getId(), migrationConfig);
int noOfStages = mantisJobMetaData.getStageMetadata().size();
if (noOfStages == 1) {
sinkStageNum = 1;
} else {
sinkStageNum = noOfStages - 1;
}
JobSchedulingInfo initialJS = new JobSchedulingInfo(jobMgr.getJobId().getId(), new HashMap<>());
currentJobSchedulingInfoStr = mapper.writeValueAsString(initialJS);
jobSchedulingInfoBehaviorSubject = BehaviorSubject.create(initialJS);
initialize(isSubmit);
}
/**
* Initializes a worker manager.
* <p>
* A WorkerManager can get initialized on a job submission or a failover.
* <p>
* Init from Job submission: submits initial workers which each go through their startup lifecycle.
* <p>
* Init from Master failover: workers are already running; gets state from Mesos and updates its view of the
* world. If worker information is bad from Mesos, gather up these worker and resubmit them in all together
* after initialization of running workers.
*
* @param isSubmit specifies if this initialization is due to job submission or a master failover.
* @throws Exception
*/
void initialize(boolean isSubmit) throws Exception {
if (isSubmit) {
submitInitialWorkers();
} else {
initializeRunningWorkers();
}
mantisJobMetaData.setJobCosts(costsCalculator.calculateCosts(mantisJobMetaData));
}
private void initializeRunningWorkers() {
// Scan for the list of all corrupted workers to be resubmitted.
List<JobWorker> workersToResubmit = markCorruptedWorkers();
// publish a refresh before enqueuing tasks to the Scheduler, as there is a potential race between
// WorkerRegistryV2 getting updated and isWorkerValid being called from SchedulingService loop
// If worker is not found in the SchedulingService loop, it is considered invalid and prematurely
// removed from Fenzo state.
markStageAssignmentsChanged(true);
for (IMantisStageMetadata stageMeta : mantisJobMetaData.getStageMetadata().values()) {
Map<Integer, WorkerHost> workerHosts = new HashMap<>();
for (JobWorker worker : stageMeta.getAllWorkers()) {
IMantisWorkerMetadata wm = worker.getMetadata();
if (WorkerState.isRunningState(wm.getState())) {
// send fake heartbeat
try {
WorkerEvent fakeHB = new WorkerHeartbeat(new Status(jobId.getId(), stageMeta.getStageNum(),
wm.getWorkerIndex(), wm.getWorkerNumber(), Status.TYPE.HEARTBEAT, "",
MantisJobState.Started, System.currentTimeMillis()));
worker.processEvent(fakeHB, jobStore);
} catch (InvalidWorkerStateChangeException | IOException e) {
LOGGER.error("problem sending initial heartbeat for Job {} during initialization",
worker.getMetadata().getJobId(), e);
}
workerHosts.put(
wm.getWorkerNumber(),
new WorkerHost(
wm.getSlave(),
wm.getWorkerIndex(),
wm.getWorkerPorts().getPorts(),
DataFormatAdapter.convertWorkerStateToMantisJobState(wm.getState()),
wm.getWorkerNumber(),
wm.getMetricsPort(),
wm.getCustomPort()));
ScheduleRequest scheduleRequest = createSchedulingRequest(wm, empty());
scheduler.initializeRunningWorker(scheduleRequest, wm.getSlave(), wm.getSlaveID());
} else if (wm.getState().equals(WorkerState.Accepted)) {
queueTask(wm);
}
}
if (stageMeta.getStageNum() > 0) {
stageAssignments.put(stageMeta.getStageNum(), new WorkerAssignments(stageMeta.getStageNum(),
stageMeta.getNumWorkers(), workerHosts));
}
}
// publish another update after queuing tasks to Fenzo (in case some workers were marked Started
// due to the Fake heartbeat in above loop)
markStageAssignmentsChanged(true);
// Resubmit workers with missing ports so they can be reassigned new resources.
for (JobWorker jobWorker : workersToResubmit) {
LOGGER.warn("discovered workers with missing ports during initialization: {}", jobWorker);
try {
resubmitWorker(jobWorker);
} catch (Exception e) {
LOGGER.warn("Exception resubmitting worker {} during initializeRunningWorkers due to {}",
jobWorker, e.getMessage(), e);
}
}
}
private List<JobWorker> markCorruptedWorkers() {
List<JobWorker> corruptedWorkers = new ArrayList<>();
for (IMantisStageMetadata stageMeta : mantisJobMetaData.getStageMetadata().values()) {
for (JobWorker worker : stageMeta.getAllWorkers()) {
IMantisWorkerMetadata wm = worker.getMetadata();
Optional<WorkerPorts> workerPortsOptional = wm.getPorts();
if (WorkerState.isRunningState(wm.getState()) &&
(!workerPortsOptional.isPresent())) {
LOGGER.info("marking corrupted worker {} for Job ID {} as {}",
worker.getMetadata().getWorkerId(), jobId, WorkerState.Failed);
numMissingWorkerPorts.increment();
// Mark this worker as corrupted.
corruptedWorkers.add(worker);
// Send initial status event to signal to the worker to mark itself as failed.
try {
WorkerStatus status = new WorkerStatus(new Status(jobId.getId(), stageMeta.getStageNum(),
wm.getWorkerIndex(), wm.getWorkerNumber(), Status.TYPE.HEARTBEAT, "",
MantisJobState.Failed, System.currentTimeMillis()));
worker.processEvent(status, jobStore);
} catch (InvalidWorkerStateChangeException | IOException e) {
LOGGER.error("problem sending initial heartbeat for Job {} during initialization",
worker.getMetadata().getJobId(), e);
}
}
}
}
return corruptedWorkers;
}
private void markStageAssignmentsChanged(boolean forceRefresh) {
this.stageAssignmentPotentiallyChanged = true;
long refreshInterval = ConfigurationProvider.getConfig().getStageAssignmentRefreshIntervalMs();
if (refreshInterval == -1 || forceRefresh) {
refreshStageAssignmentsAndPush();
}
}
private void refreshStageAssignmentsAndPush() {
if (!stageAssignmentPotentiallyChanged) {
return;
}
List<IMantisWorkerMetadata> acceptedAndActiveWorkers = new ArrayList<>();
List<IMantisWorkerMetadata> activeWorkers = new ArrayList<>();
for (IMantisStageMetadata stageMeta : mantisJobMetaData.getStageMetadata().values()) {
Map<Integer, WorkerHost> workerHosts = new HashMap<>();
for (JobWorker worker : stageMeta.getAllWorkers()) {
IMantisWorkerMetadata wm = worker.getMetadata();
if (WorkerState.isRunningState(wm.getState())) {
workerHosts.put(
wm.getWorkerNumber(),
new WorkerHost(
wm.getSlave(),
wm.getWorkerIndex(),
wm.getWorkerPorts().getPorts(),
DataFormatAdapter.convertWorkerStateToMantisJobState(wm.getState()),
wm.getWorkerNumber(),
wm.getMetricsPort(),
wm.getCustomPort()));
activeWorkers.add(wm);
acceptedAndActiveWorkers.add(wm);
} else if (wm.getState().equals(WorkerState.Accepted)) {
acceptedAndActiveWorkers.add(wm);
}
}
stageAssignments.put(stageMeta.getStageNum(), new WorkerAssignments(stageMeta.getStageNum(),
stageMeta.getNumWorkers(), workerHosts));
}
JobSchedulingInfo jobSchedulingInfo = new JobSchedulingInfo(jobId.getId(), stageAssignments);
jobSchedulingInfoBehaviorSubject.onNext(jobSchedulingInfo);
eventPublisher.publishWorkerListChangedEvent(new LifecycleEventsProto.WorkerListChangedEvent(
new WorkerInfoListHolder(this.jobMgr.getJobId(), acceptedAndActiveWorkers)));
numSchedulingChangesRefreshed.increment();
stageAssignmentPotentiallyChanged = false;
}
private void submitInitialWorkers() throws Exception {
List<IMantisWorkerMetadata> workers = getInitialWorkers(
mantisJobMetaData.getJobDefinition(),
System.currentTimeMillis());
int beg = 0;
while (true) {
if (beg >= workers.size()) {
break;
}
int en = beg + Math.min(workerWritesBatchSize, workers.size() - beg);
final List<IMantisWorkerMetadata> workerRequests = workers.subList(beg, en);
try {
jobStore.storeNewWorkers(jobMgr.getJobDetails(), workerRequests);
LOGGER.info("Stored workers {} for Job {}", workerRequests, jobId);
// refresh Worker Registry state before enqueuing task to Scheduler
markStageAssignmentsChanged(true);
// queue to scheduler
workerRequests.forEach(this::queueTask);
} catch (Exception e) {
LOGGER.error("Error {} storing workers of job {}", e.getMessage(), jobId.getId(), e);
throw new RuntimeException("Exception saving worker for Job " + jobId, e);
}
beg = en;
}
}
private void queueTask(final IMantisWorkerMetadata workerRequest, final Optional<Long> readyAt) {
final ScheduleRequest schedulingRequest = createSchedulingRequest(workerRequest, readyAt);
LOGGER.info("Queueing up scheduling request {} ", schedulingRequest);
try {
scheduler.scheduleWorker(schedulingRequest);
} catch (Exception e) {
LOGGER.error("Exception queueing task", e);
}
}
private void queueTask(final IMantisWorkerMetadata workerRequest) {
queueTask(workerRequest, empty());
}
private ScheduleRequest createSchedulingRequest(
final IMantisWorkerMetadata workerRequest,
final Optional<Long> readyAt) {
try {
final WorkerId workerId = workerRequest.getWorkerId();
// setup constraints
final List<ConstraintEvaluator> hardConstraints = new ArrayList<>();
final List<VMTaskFitnessCalculator> softConstraints = new ArrayList<>();
Optional<IMantisStageMetadata> stageMetadataOp =
mantisJobMetaData.getStageMetadata(workerRequest.getStageNum());
if (!stageMetadataOp.isPresent()) {
throw new RuntimeException(String.format("No such stage %d", workerRequest.getStageNum()));
}
IMantisStageMetadata stageMetadata = stageMetadataOp.get();
List<JobConstraints> stageHC = stageMetadata.getHardConstraints();
List<JobConstraints> stageSC = stageMetadata.getSoftConstraints();
final Set<String> coTasks = new HashSet<>();
if ((stageHC != null && !stageHC.isEmpty())
|| (stageSC != null && !stageSC.isEmpty())) {
for (JobWorker jobWorker : stageMetadata.getAllWorkers()) {
if (jobWorker.getMetadata().getWorkerNumber() != workerId.getWorkerNum()) {
coTasks.add(workerId.getId());
}
}
}
if (stageHC != null && !stageHC.isEmpty()) {
for (JobConstraints c : stageHC) {
hardConstraints.add(ConstraintsEvaluators.hardConstraint(c, coTasks));
}
}
if (stageSC != null && !stageSC.isEmpty()) {
for (JobConstraints c : stageSC) {
softConstraints.add(ConstraintsEvaluators.softConstraint(c, coTasks));
}
}
ScheduleRequest sr = new ScheduleRequest(
workerId,
workerRequest.getStageNum(),
workerRequest.getNumberOfPorts(),
new JobMetadata(
mantisJobMetaData.getJobId().getId(),
mantisJobMetaData.getJobJarUrl(),
mantisJobMetaData.getTotalStages(),
mantisJobMetaData.getUser(),
mantisJobMetaData.getSchedulingInfo(),
mantisJobMetaData.getParameters(),
getSubscriptionTimeoutSecs(mantisJobMetaData),
getHeartbeatIntervalSecs(mantisJobMetaData),
mantisJobMetaData.getMinRuntimeSecs()
),
mantisJobMetaData.getSla().orElse(new JobSla.Builder().build()).getDurationType(),
stageMetadata.getMachineDefinition(),
hardConstraints,
softConstraints,
readyAt.orElse(0L),
workerRequest.getPreferredClusterOptional());
return sr;
} catch (Exception e) {
LOGGER.error("Exception creating scheduleRequest ", e);
throw e;
}
}
private List<IMantisWorkerMetadata> getInitialWorkers(JobDefinition jobDetails, long submittedAt)
throws Exception {
List<IMantisWorkerMetadata> workerRequests = Lists.newLinkedList();
SchedulingInfo schedulingInfo = jobDetails.getSchedulingInfo();
int totalStages = schedulingInfo.getStages().size();
Iterator<Integer> it = schedulingInfo.getStages().keySet().iterator();
while (it.hasNext()) {
int stageNum = it.next();
List<IMantisWorkerMetadata> stageWorkers = setupStageWorkers(schedulingInfo, totalStages,
stageNum, submittedAt);
workerRequests.addAll(stageWorkers);
}
return workerRequests;
}
private List<IMantisWorkerMetadata> setupStageWorkers(
SchedulingInfo schedulingInfo, int totalStages,
int stageNum, long submittedAt) throws Exception {
List<IMantisWorkerMetadata> workerRequests = new LinkedList<>();
StageSchedulingInfo stage = schedulingInfo.getStages().get(stageNum);
if (stage == null) {
LOGGER.error("StageSchedulingInfo cannot be null for Stage {}", stageNum);
throw new Exception("StageSchedulingInfo cannot be null for Stage " + stageNum);
//return workerRequests; // can happen when stageNum=0 and there is no jobMaster defined
}
int numInstancesAtStage = stage.getNumberOfInstances();
// add worker request for each instance required in stage
int stageIndex = 0;
for (int i = 0; i < numInstancesAtStage; i++) {
// during initialization worker number and index are identical
int workerIndex = stageIndex++;
if (!mantisJobMetaData.getStageMetadata(stageNum).isPresent()) {
IMantisStageMetadata msmd = new MantisStageMetadataImpl.Builder().
withJobId(jobId)
.withStageNum(stageNum)
.withNumStages(totalStages)
.withMachineDefinition(stage.getMachineDefinition())
.withNumWorkers(numInstancesAtStage)
.withHardConstraints(stage.getHardConstraints())
.withSoftConstraints(stage.getSoftConstraints())
.withScalingPolicy(stage.getScalingPolicy())
.isScalable(stage.getScalable())
.build();
mantisJobMetaData.addJobStageIfAbsent(msmd);
jobStore.updateStage(msmd);
}
IMantisWorkerMetadata mwmd = addWorker(schedulingInfo, stageNum, workerIndex);
workerRequests.add(mwmd);
}
return workerRequests;
}
private IMantisWorkerMetadata addWorker(SchedulingInfo schedulingInfo, int stageNo, int workerIndex)
throws InvalidJobException {
StageSchedulingInfo stageSchedInfo = schedulingInfo.getStages().get(stageNo);
int workerNumber = workerNumberGenerator.getNextWorkerNumber(mantisJobMetaData, jobStore);
JobWorker jw = new JobWorker.Builder()
.withJobId(jobId)
.withWorkerIndex(workerIndex)
.withWorkerNumber(workerNumber)
.withNumberOfPorts(stageSchedInfo.getMachineDefinition().getNumPorts()
+ MANTIS_SYSTEM_ALLOCATED_NUM_PORTS)
.withStageNum(stageNo)
.withLifecycleEventsPublisher(eventPublisher)
.build();
if (!mantisJobMetaData.addWorkerMetadata(stageNo, jw)) {
Optional<JobWorker> tmp = mantisJobMetaData.getWorkerByIndex(stageNo, workerIndex);
if (tmp.isPresent()) {
throw new InvalidJobException(mantisJobMetaData.getJobId().getId(), stageNo, workerIndex,
new Exception("Couldn't add worker " + workerNumber + " as index " + workerIndex
+ ", that index already has worker " + tmp.get().getMetadata().getWorkerNumber()));
} else {
throw new InvalidJobException(mantisJobMetaData.getJobId().getId(), stageNo, workerIndex,
new Exception("Couldn't add worker " + workerNumber + " as index "
+ workerIndex + "doesn't exist "));
}
}
mantisJobMetaData.setJobCosts(costsCalculator.calculateCosts(mantisJobMetaData));
return jw.getMetadata();
}
@Override
public void shutdown() {
// if workers have not already completed
if (!allWorkerCompleted()) {
// kill workers
terminateAllWorkersAsync();
}
//send empty schedulingInfo changes so downstream jobs would explicitly disconnect
jobSchedulingInfoBehaviorSubject.onNext(new JobSchedulingInfo(
this.jobMgr.getJobId().getId(),
new HashMap<>()));
jobSchedulingInfoBehaviorSubject.onCompleted();
}
private void terminateAllWorkersAsync() {
LOGGER.info("Terminating all workers of job {}", jobId);
Observable.from(mantisJobMetaData.getStageMetadata().values())
.flatMap((st) -> Observable.from(st.getAllWorkers()))
.filter((worker) -> !WorkerState.isTerminalState(worker.getMetadata().getState()))
.map((worker) -> {
LOGGER.info("Terminating " + worker);
terminateWorker(worker.getMetadata(), WorkerState.Completed, JobCompletedReason.Killed);
return worker;
})
.doOnCompleted(() -> markStageAssignmentsChanged(true))
.subscribeOn(Schedulers.io())
.subscribe();
LOGGER.info("Terminated all workers of job {}", jobId);
}
private void terminateWorker(
IMantisWorkerMetadata workerMeta, WorkerState finalWorkerState,
JobCompletedReason reason) {
LOGGER.info("Terminating worker {} with number {}", workerMeta, workerMeta.getWorkerNumber());
try {
WorkerId workerId = workerMeta.getWorkerId();
// call vmservice terminate
scheduler.unscheduleAndTerminateWorker(
workerMeta.getWorkerId(),
Optional.ofNullable(workerMeta.getSlave()));
int stageNum = mantisJobMetaData.getWorkerNumberToStageMap().get(workerMeta.getWorkerNumber());
Optional<IMantisStageMetadata> stageMetaOp = mantisJobMetaData.getStageMetadata(stageNum);
if (stageMetaOp.isPresent()) {
// Mark work as terminal
WorkerTerminate terminateEvent = new WorkerTerminate(workerId, finalWorkerState, reason);
MantisStageMetadataImpl stageMetaData = (MantisStageMetadataImpl) stageMetaOp.get();
Optional<JobWorker> jobWorkerOp = stageMetaData.processWorkerEvent(terminateEvent, jobStore);
// Mark work as terminal
if (jobWorkerOp.isPresent()) {
jobStore.archiveWorker(jobWorkerOp.get().getMetadata());
eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(INFO,
"Terminated worker, reason: " + reason.name(),
workerMeta.getStageNum(), workerMeta.getWorkerId(), workerMeta.getState()));
}
} else {
LOGGER.error("Stage {} not found while terminating worker {}", stageNum, workerId);
}
} catch (Exception e) {
LOGGER.error("Error terminating worker {}", workerMeta.getWorkerId(), e);
}
}
private void terminateAndRemoveWorker(
IMantisWorkerMetadata workerMeta, WorkerState finalWorkerState,
JobCompletedReason reason) {
LOGGER.info("Terminating and removing worker {}", workerMeta.getWorkerId().getId());
try {
WorkerId workerId = workerMeta.getWorkerId();
int stageNum = mantisJobMetaData.getWorkerNumberToStageMap().get(workerMeta.getWorkerNumber());
Optional<IMantisStageMetadata> stageMetaOp = mantisJobMetaData.getStageMetadata(stageNum);
if (stageMetaOp.isPresent()) {
// Mark work as terminal
WorkerTerminate terminateEvent = new WorkerTerminate(workerId, finalWorkerState, reason);
MantisStageMetadataImpl stageMetaData = (MantisStageMetadataImpl) stageMetaOp.get();
Optional<JobWorker> workerOp = stageMetaData.processWorkerEvent(terminateEvent, jobStore);
eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(INFO,
"Removing worker, reason: " + reason.name(),
workerMeta.getStageNum(), workerMeta.getWorkerId(), workerMeta.getState()));
// remove this worker index and archives the worker
stageMetaData.unsafeRemoveWorker(workerId.getWorkerIndex(), workerId.getWorkerNum(), jobStore);
// call vmservice terminate
scheduler.unscheduleAndTerminateWorker(workerMeta.getWorkerId(), Optional.ofNullable(
workerMeta.getSlave()));
//remove from workerNumber to stage map
mantisJobMetaData.removeWorkerMetadata(workerMeta.getWorkerNumber());
mantisJobMetaData.setJobCosts(costsCalculator.calculateCosts(mantisJobMetaData));
LOGGER.info("Terminated worker {}", workerMeta);
markStageAssignmentsChanged(true);
} else {
LOGGER.error("Stage {} not found while terminating worker {}", stageNum, workerId);
}
} catch (Exception e) {
LOGGER.error("Error terminating worker {}", workerMeta.getWorkerId(), e);
}
}
@Override
public void refreshAndSendWorkerAssignments() {
refreshStageAssignmentsAndPush();
}
@Override
public void checkHeartBeats(Instant currentTime) {
LOGGER.debug("Using worker timeout {} for job {}", getWorkerTimeoutSecs(), this.jobMgr.getJobId());
// heartbeat misses are calculated as 3 * heartbeatInterval, pick 1.5 multiplier for this check interval
long missedHeartBeatToleranceSecs = (long) (1.5 * getWorkerTimeoutSecs());
// Allow more time for workers to start
long stuckInSubmitToleranceSecs =
missedHeartBeatToleranceSecs + ConfigurationProvider.getConfig().getWorkerInitTimeoutSecs();
List<JobWorker> workersToResubmit = Lists.newArrayList();
// expire worker resubmit entries
resubmitRateLimiter.expireResubmitRecords(currentTime.toEpochMilli());
// For each stage
for (IMantisStageMetadata stage : mantisJobMetaData.getStageMetadata().values()) {
// For each worker in the stage
for (JobWorker worker : stage.getAllWorkers()) {
IMantisWorkerMetadata workerMeta = worker.getMetadata();
if (!workerMeta.getLastHeartbeatAt().isPresent()) {
Instant acceptedAt = Instant.ofEpochMilli(workerMeta.getAcceptedAt());
if (Duration.between(acceptedAt, currentTime).getSeconds() > stuckInSubmitToleranceSecs) {
// worker stuck in accepted
workersToResubmit.add(worker);
eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(
WARN,
"worker stuck in Accepted state, resubmitting worker",
workerMeta.getStageNum(),
workerMeta.getWorkerId(),
workerMeta.getState()));
}
} else {
if (Duration.between(workerMeta.getLastHeartbeatAt().get(), currentTime).getSeconds()
> missedHeartBeatToleranceSecs) {
// heartbeat too old
LOGGER.info("Job {}, Worker {} Duration between last heartbeat and now {} "
+ "missed heart beat threshold {} exceeded", this.jobMgr.getJobId(),
workerMeta.getWorkerId(), Duration.between(
workerMeta.getLastHeartbeatAt().get(),
currentTime).getSeconds(), missedHeartBeatToleranceSecs);
if (ConfigurationProvider.getConfig().isHeartbeatTerminationEnabled()) {
eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(WARN,
"heartbeat too old, resubmitting worker", workerMeta.getStageNum(),
workerMeta.getWorkerId(), workerMeta.getState()));
workersToResubmit.add(worker);
} else {
LOGGER.warn(
"Heart beat based termination is disabled. Skipping termination of "
+ "worker {} Please see mantis.worker.heartbeat.termination.enabled",
workerMeta);
}
}
}
}
}
for (JobWorker worker : workersToResubmit) {
try {
resubmitWorker(worker);
} catch (Exception e) {
LOGGER.warn(
"Exception {} occurred resubmitting Worker {}",
e.getMessage(),
worker.getMetadata(),
e);
}
}
migrateDisabledVmWorkers(currentTime);
}
@Override
public void migrateDisabledVmWorkers(Instant currentTime) {
if (!workersToMigrate.isEmpty()) {
Map<Integer, Integer> workerToStageMap = mantisJobMetaData.getWorkerNumberToStageMap();
final List<Integer> workers = migrationStrategy.execute(workersToMigrate,
getNumberOfWorkersInStartedState(), getTotalWorkerCount(), lastWorkerMigrationTimestamp);
if (!workers.isEmpty()) {
LOGGER.info("Job {} Going to migrate {} workers in this iteration", jobId, workers.size());
}
workers.forEach((w) -> {
if (workerToStageMap.containsKey(w)) {
int stageNo = workerToStageMap.get(w);
Optional<IMantisStageMetadata> stageMetaOp = mantisJobMetaData.getStageMetadata(stageNo);
if (stageMetaOp.isPresent()) {
JobWorker jobWorker = null;
try {
jobWorker = stageMetaOp.get().getWorkerByWorkerNumber(w);
IMantisWorkerMetadata wm = jobWorker.getMetadata();
LOGGER.info("Moving worker {} of job {} away from disabled VM", wm.getWorkerId(),
jobId);
eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(INFO,
" Moving out of disabled VM " + wm.getSlave(), wm.getStageNum(),
wm.getWorkerId(), wm.getState()));
resubmitWorker(jobWorker);
lastWorkerMigrationTimestamp = System.currentTimeMillis();
} catch (Exception e) {
LOGGER.warn("Exception resubmitting worker {} during migration due to {}",
jobWorker, e.getMessage(), e);
}
} else {
LOGGER.warn("Stage {} Not Found. Skip move for worker {} in Job {}", stageNo, w, jobId);
}
} else {
LOGGER.warn("worker {} not found in workerToStageMap {} for Job {}", w, workerToStageMap,
jobId);
}
});
}
}
private Optional<IMantisStageMetadata> getStageForWorker(WorkerEvent event) {
// Make sure we know about this worker. If not terminate it
Map<Integer, Integer> workerToStageMap = mantisJobMetaData.getWorkerNumberToStageMap();
if (!workerToStageMap.containsKey(event.getWorkerId().getWorkerNum())) {
LOGGER.warn("Event {} from Unknown worker {} ", event.getWorkerId(), event);
return empty();
}
// Find stage associated with this worker
Integer stageNum = workerToStageMap.get(event.getWorkerId().getWorkerNum());
Optional<IMantisStageMetadata> stageMetaOp = mantisJobMetaData.getStageMetadata(stageNum);
if (!stageMetaOp.isPresent()) {
LOGGER.warn("Stage {} not found in Job {} while processing event {}", stageNum, jobId, event);
}
return stageMetaOp;
}
private void terminateUnknownWorkerIfNonTerminal(final WorkerEvent event) {
if (!JobHelper.isTerminalWorkerEvent(event)) {
LOGGER.warn("Non terminal event from Unknown worker {} in Job {}. Request Termination",
event.getWorkerId(), this.jobMgr.getJobId());
Optional<String> host = JobHelper.getWorkerHostFromWorkerEvent(event);
scheduler.unscheduleAndTerminateWorker(event.getWorkerId(), host);
} else {
LOGGER.warn("Job {} Terminal event from Unknown worker {}. Ignoring", jobId, event.getWorkerId());
}
}
@Override
public void processEvent(WorkerEvent event, JobState jobState) {
try {
Optional<IMantisStageMetadata> stageMetaOp = getStageForWorker(event);
if (!stageMetaOp.isPresent()) {
terminateUnknownWorkerIfNonTerminal(event);
return;
}
// If worker cannot be scheduled currently, then put it back on the queue with delay and don't update
// its state
if (event instanceof WorkerUnscheduleable) {
scheduler.updateWorkerSchedulingReadyTime(
event.getWorkerId(),
resubmitRateLimiter.getWorkerResubmitTime(
event.getWorkerId(),
stageMetaOp.get().getStageNum()));
eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(
LifecycleEventsProto.StatusEvent.StatusEventType.ERROR,
"rate limiting: no resources to fit worker",
((WorkerUnscheduleable) event).getStageNum(), event.getWorkerId(), WorkerState.Accepted));
return;
}
MantisStageMetadataImpl stageMeta = (MantisStageMetadataImpl) stageMetaOp.get();
// Check if stage worker state (worker index -> worker number) is consistent with the worker event.
// TODO: add termination once confirmed the actual corruption scenario.
try {
if (event instanceof WorkerHeartbeat) {
int eventWorkerIndex = event.getWorkerId().getWorkerIndex();
int eventWorkerNum = event.getWorkerId().getWorkerNum();
int currentWorkerNum = stageMeta.getWorkerByIndex(eventWorkerIndex).getMetadata().getWorkerNumber();
if (currentWorkerNum > eventWorkerNum) {
// event is from a different worker number on same worker index
LOGGER.error(
"[Corrupted state] StaleWorkerEvent: {}, current worker at {}, Terminate stale "
+ "worker",
event.getWorkerId(),
currentWorkerNum);
}
else if (currentWorkerNum < eventWorkerNum) {
// this case should not happen as new worker assignment should update state and persist first.
LOGGER.error(
"[Corrupted state] Newer worker num received: {}, Current stage worker: {}",
event,
currentWorkerNum);
}
}
} catch (InvalidJobException ije) {
LOGGER.error("Invalid job error when checking event: {}", event, ije);
}
try {
// Delegate processing of the event to the stage
Optional<JobWorker> workerOp = stageMeta.processWorkerEvent(event, jobStore);
if (!workerOp.isPresent()) {
terminateUnknownWorkerIfNonTerminal(event);
return;
}
IMantisWorkerMetadata wm = workerOp.get().getMetadata();
// If we need to migrate off of disabled VM add it to the queue
if (event instanceof WorkerOnDisabledVM) {
workersToMigrate.add(wm.getWorkerNumber());
return;
}
// Worker transitioned to terminal state resubmit
if (WorkerState.isErrorState(wm.getState()) && !JobState.isTerminalState(jobState)) {
eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(WARN,
"resubmitting lost worker ", wm.getStageNum(),
wm.getWorkerId(), wm.getState()));
recentErrorWorkersCache.put(wm.getWorkerNumber(), true);
resubmitWorker(workerOp.get());
return;
} else if (WorkerState.isTerminalState(wm.getState())) { // worker has explicitly
// completed complete job
jobStore.archiveWorker(wm);
LOGGER.info("Received Worker Complete signal. Wait for all workers to complete before "
+ "terminating Job {}", jobId);
}
if (!(event instanceof WorkerHeartbeat)) {
markStageAssignmentsChanged(false);
}
} catch (Exception e) {
LOGGER.warn("Exception saving worker update", e);
}
if (!allWorkersStarted && !JobState.isTerminalState(jobState)) {
if (allWorkerStarted()) {
allWorkersStarted = true;
jobMgr.onAllWorkersStarted();
markStageAssignmentsChanged(true);
} else if (allWorkerCompleted()) {
LOGGER.info("Job {} All workers completed1", jobId);
allWorkersStarted = false;
jobMgr.onAllWorkersCompleted();
}
} else {
if (allWorkerCompleted()) {
LOGGER.info("Job {} All workers completed", jobId);
allWorkersStarted = false;
jobMgr.onAllWorkersCompleted();
}
}
} catch (Exception e1) {
LOGGER.error("Job {} Exception occurred in process worker event ", jobId, e1);
}
}
private boolean allWorkerStarted() {
Iterator<? extends IMantisStageMetadata> iterator =
mantisJobMetaData.getStageMetadata().values().iterator();
while (iterator.hasNext()) {
MantisStageMetadataImpl stageMeta = (MantisStageMetadataImpl) iterator.next();
if (!stageMeta.isAllWorkerStarted()) {
return false;
}
}
return true;
}
private int getNumberOfWorkersInStartedState() {
return mantisJobMetaData.getStageMetadata().values().stream()
.map((stageMeta) -> ((MantisStageMetadataImpl) stageMeta).getNumStartedWorkers())
.reduce(0, (acc, num) -> acc + num);
}
private int getTotalWorkerCount() {
return mantisJobMetaData.getStageMetadata().values().stream()
.map(IMantisStageMetadata::getNumWorkers)
.reduce(0, (acc, num) -> acc + num);
}
private boolean allWorkerCompleted() {
Iterator<? extends IMantisStageMetadata> iterator =
mantisJobMetaData.getStageMetadata().values().iterator();
while (iterator.hasNext()) {
MantisStageMetadataImpl stageMeta = (MantisStageMetadataImpl) iterator.next();
// skip job master worker
if (stageMeta.getStageNum() == 0) {
continue;
}
if (!stageMeta.isAllWorkerCompleted()) {
return false;
}
}
return true;
}
@Override
public void resubmitWorker(int workerNum) throws Exception {
Map<Integer, Integer> workerToStageMap = mantisJobMetaData.getWorkerNumberToStageMap();
if (workerToStageMap.containsKey(workerNum)) {
int stageNum = workerToStageMap.get(workerNum);
Optional<IMantisStageMetadata> stageMeta = mantisJobMetaData.getStageMetadata(stageNum);
if (stageMeta.isPresent()) {
JobWorker worker = stageMeta.get().getWorkerByWorkerNumber(workerNum);
resubmitWorker(worker);
} else {
throw new Exception(String.format("Invalid stage %d in resubmit Worker request %d", stageNum,
workerNum));
}
} else {
LOGGER.warn("No such Worker number {} in Job with ID {}", workerNum, jobId);
throw new Exception(String.format("No such worker number %d in resubmit Worker request", workerNum));
}
}
@Override
public List<IMantisWorkerMetadata> getActiveWorkers(int limit) {
List<IMantisWorkerMetadata> workers = mantisJobMetaData.getStageMetadata().values()
.stream()
.flatMap((st) -> st.getAllWorkers().stream())
.filter((worker) -> !WorkerState.isTerminalState(worker.getMetadata().getState()))
.map(JobWorker::getMetadata)
.collect(Collectors.toList());
if (workers.size() > limit) {
return workers.subList(0, limit);
} else {
return workers;
}
}
@Override
public BehaviorSubject<JobSchedulingInfo> getJobStatusSubject() {
return this.jobSchedulingInfoBehaviorSubject;
}
private void resubmitWorker(JobWorker oldWorker) throws Exception {
LOGGER.info("Resubmitting worker {}", oldWorker.getMetadata());
Map<Integer, Integer> workerToStageMap = mantisJobMetaData.getWorkerNumberToStageMap();
IMantisWorkerMetadata oldWorkerMetadata = oldWorker.getMetadata();
if (recentErrorWorkersCache.size()
< ConfigurationProvider.getConfig().getMaximumResubmissionsPerWorker()) {
Integer stageNo = workerToStageMap.get(oldWorkerMetadata.getWorkerId().getWorkerNum());
if (stageNo == null) {
String errMsg = String.format("Stage %d not found in Job %s while resubmiting worker %s",
stageNo, jobId, oldWorker);
LOGGER.warn(errMsg);
throw new Exception(errMsg);
}
Optional<IMantisStageMetadata> stageMetaOp = mantisJobMetaData.getStageMetadata(stageNo);
if (!stageMetaOp.isPresent()) {
String errMsg = String.format("Stage %d not found in Job %s while resubmiting worker %s",
stageNo, jobId, oldWorker);
LOGGER.warn(errMsg);
throw new Exception(errMsg);
}
MantisStageMetadataImpl stageMeta = (MantisStageMetadataImpl) stageMetaOp.get();
JobWorker newWorker = new JobWorker.Builder()
.withJobId(jobId)
.withWorkerIndex(oldWorkerMetadata.getWorkerIndex())
.withWorkerNumber(workerNumberGenerator.getNextWorkerNumber(mantisJobMetaData, jobStore))
.withNumberOfPorts(stageMeta.getMachineDefinition().getNumPorts()
+ MANTIS_SYSTEM_ALLOCATED_NUM_PORTS)
.withStageNum(oldWorkerMetadata.getStageNum())
.withResubmitCount(oldWorkerMetadata.getTotalResubmitCount() + 1)
.withResubmitOf(oldWorkerMetadata.getWorkerNumber())
.withLifecycleEventsPublisher(eventPublisher)
.build();
mantisJobMetaData.replaceWorkerMetaData(oldWorkerMetadata.getStageNum(), newWorker, oldWorker,
jobStore);
mantisJobMetaData.setJobCosts(costsCalculator.calculateCosts(mantisJobMetaData));
// kill the task if it is still running
scheduler.unscheduleAndTerminateWorker(
oldWorkerMetadata.getWorkerId(),
Optional.ofNullable(oldWorkerMetadata.getSlave()));
long workerResubmitTime = resubmitRateLimiter.getWorkerResubmitTime(
newWorker.getMetadata().getWorkerId(), stageMeta.getStageNum());
Optional<Long> delayDuration = of(workerResubmitTime);
// publish a refresh before enqueuing new Task to Scheduler
markStageAssignmentsChanged(true);
// queue the new worker for execution
queueTask(newWorker.getMetadata(), delayDuration);
LOGGER.info("Worker {} successfully queued for scheduling", newWorker);
numWorkerResubmissions.increment();
} else {
// todo numWorkerResubmitLimitReached.increment();
LOGGER.error("Resubmit count exceeded");
jobMgr.onTooManyWorkerResubmits();
}
}
/**
* Preconditions : Stage is Valid and scalable Determines the actual no of workers for this stage within min and
* max, updates the expected num workers first and saves to store. (If that fails we abort the operation) then
* continues adding/terminating worker one by one. If an exception occurs adding/removing any worker we continue
* forward with others. Heartbeat check should kick in and resubmit any workers that didn't get scheduled
*/
@Override
public int scaleStage(MantisStageMetadataImpl stageMetaData, int numWorkers, String reason) {
LOGGER.info("Scaling stage {} to {} workers", stageMetaData.getStageNum(), numWorkers);
final int oldNumWorkers = stageMetaData.getNumWorkers();
int max = ConfigurationProvider.getConfig().getMaxWorkersPerStage();
int min = 0;
if (stageMetaData.getScalingPolicy() != null) {
max = stageMetaData.getScalingPolicy().getMax();
min = stageMetaData.getScalingPolicy().getMin();
}
// sanitize input worker count to be between min and max
int newNumWorkerCount = Math.max(Math.min(numWorkers, max), min);
if (newNumWorkerCount != oldNumWorkers) {
try {
stageMetaData.unsafeSetNumWorkers(newNumWorkerCount, jobStore);
eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(INFO,
String.format("Setting #workers to %d for stage %d, reason=%s", newNumWorkerCount,
stageMetaData.getStageNum(), reason), getJobId(), getJobState()));
} catch (Exception e) {
String error = String.format("Exception updating stage %d worker count for Job %s due to %s",
stageMetaData.getStageNum(), jobId, e.getMessage());
LOGGER.warn(error);
eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(WARN,
String.format("Scaling stage failed for stage %d reason: %s", stageMetaData.getStageNum(), e.getMessage()),
getJobId(), getJobState()));
throw new RuntimeException(error);
}
if (newNumWorkerCount > oldNumWorkers) {
for (int i = 0; i < newNumWorkerCount - oldNumWorkers; i++) {
try {
int newWorkerIndex = oldNumWorkers + i;
SchedulingInfo schedInfo = mantisJobMetaData.getJobDefinition().getSchedulingInfo();
IMantisWorkerMetadata workerRequest = addWorker(schedInfo, stageMetaData.getStageNum(),
newWorkerIndex);
jobStore.storeNewWorker(workerRequest);
markStageAssignmentsChanged(true);
queueTask(workerRequest);
} catch (Exception e) {
// creating a worker failed but expected no of workers was set successfully,
// during heartbeat check we will
// retry launching this worker
LOGGER.warn("Exception adding new worker for {}", stageMetaData.getJobId().getId(), e);
}
}
} else {
// potential bulk removal opportunity?
for (int i = 0; i < oldNumWorkers - newNumWorkerCount; i++) {
try {
final JobWorker w = stageMetaData.getWorkerByIndex(oldNumWorkers - i - 1);
terminateAndRemoveWorker(w.getMetadata(), WorkerState.Completed, JobCompletedReason.Killed);
} catch (InvalidJobException e) {
// deleting a worker failed but expected no of workers was set successfully,
// during heartbeat check we will
// retry killing this worker
LOGGER.warn("Exception terminating worker for {}", stageMetaData.getJobId().getId(), e);
}
}
}
}
LOGGER.info("{} Scaled stage to {} workers", stageMetaData.getJobId().getId(), newNumWorkerCount);
return newNumWorkerCount;
}
}
private String getResourceCluster() {
return mantisJobMetaData.getJobDefinition().getResourceCluster().map(ClusterID::getResourceID).orElse("mesos");
}
}
| 8,063 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/FilterableMantisJobMetadataWritable.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.master.domain.Costs;
import io.mantisrx.server.master.store.MantisJobMetadataWritable;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.net.URL;
import java.util.List;
@JsonFilter("jobMetadata")
public class FilterableMantisJobMetadataWritable extends MantisJobMetadataWritable {
private final Costs costs;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public FilterableMantisJobMetadataWritable(@JsonProperty("jobId") String jobId,
@JsonProperty("name") String name,
@JsonProperty("user") String user,
@JsonProperty("submittedAt") long submittedAt,
@JsonProperty("startedAt") long startedAt,
@JsonProperty("jarUrl") URL jarUrl,
@JsonProperty("numStages") int numStages,
@JsonProperty("sla") JobSla sla,
@JsonProperty("state") MantisJobState state,
@JsonProperty("workerTimeoutSecs") long workerTimeoutSecs,
@JsonProperty("heartbeatIntervalSecs") long heartbeatIntervalSecs,
@JsonProperty("subscriptionTimeoutSecs") long subscriptionTimeoutSecs,
@JsonProperty("parameters") List<Parameter> parameters,
@JsonProperty("nextWorkerNumberToUse") int nextWorkerNumberToUse,
@JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig,
@JsonProperty("labels") List<Label> labels,
@JsonProperty("costs") Costs costs) {
super(jobId, name, user, submittedAt, startedAt, jarUrl, numStages, sla, state, workerTimeoutSecs,
heartbeatIntervalSecs, subscriptionTimeoutSecs, parameters, nextWorkerNumberToUse, migrationConfig, labels);
this.costs = costs;
}
public Costs getCosts() {
return costs;
}
}
| 8,064 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/JobHelper.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import static java.util.Optional.empty;
import static java.util.Optional.ofNullable;
import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.master.jobcluster.job.worker.WorkerStatus;
import io.mantisrx.master.jobcluster.job.worker.WorkerTerminate;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import io.mantisrx.server.master.scheduler.WorkerLaunched;
import io.mantisrx.server.master.scheduler.WorkerResourceStatus;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* General Job utility methods.
*/
public final class JobHelper {
private static final Logger LOGGER = LoggerFactory.getLogger(JobHelper.class);
private JobHelper() {
}
/**
* Give scheduling info and whether job has Job Master return the list of user stages.
* Job Master stage is considered a system stage so is excluded if present.
* @param schedulingInfo
* @param hasJobMaster
* @return
*/
public static List<Integer> getUserStageNumbers(SchedulingInfo schedulingInfo, boolean hasJobMaster) {
List<Integer> stageNumbers = new ArrayList<>();
int totalStages = schedulingInfo.getStages().size();
if (hasJobMaster) {
totalStages = totalStages - 1;
}
for (int i = 1; i <= totalStages; i++) {
stageNumbers.add(i);
}
return stageNumbers;
}
/**
* Determines whether a workerevent is terminal.
* @param workerEvent
* @return
*/
public static boolean isTerminalWorkerEvent(WorkerEvent workerEvent) {
if (workerEvent instanceof WorkerTerminate) {
return true;
} else if (workerEvent instanceof WorkerStatus) {
WorkerStatus status = (WorkerStatus) workerEvent;
if (WorkerState.isTerminalState(status.getState())) {
return true;
}
} else if (workerEvent instanceof WorkerResourceStatus) {
WorkerResourceStatus.VMResourceState state = ((WorkerResourceStatus) workerEvent).getState();
if (WorkerResourceStatus.VMResourceState.FAILED.equals(state)
|| WorkerResourceStatus.VMResourceState.COMPLETED.equals(state)) {
return true;
}
}
return false;
}
/**
* Extract hostname from worker event if present.
* @param event
* @return
*/
public static Optional<String> getWorkerHostFromWorkerEvent(WorkerEvent event) {
Optional<String> host = empty();
if (event instanceof WorkerLaunched) {
host = ofNullable(((WorkerLaunched) event).getHostname());
} else if (event instanceof WorkerHeartbeat) {
host = ofNullable(((WorkerHeartbeat) event).getStatus().getHostname());
} else {
LOGGER.warn("Host name unknown for workerId {}", event.getWorkerId());
}
return host;
}
/**
* Called after a) All workers started and job goes to Launched state
* b) Mantis Master is restarting and its reinitializing this Job
* This method calculates the remaining time for this job to run.
* @param maxRuntimeSecs
* @param startedAt
* @return
*/
public static long calculateRuntimeDuration(long maxRuntimeSecs, Instant startedAt) {
long terminateJobInSecs = maxRuntimeSecs;
if (maxRuntimeSecs > 0) {
Instant now = Instant.now();
if (now.isAfter(startedAt)) {
// Job was already running (Occurs when master was restarted)
long elapsedSeconds = now.getEpochSecond() - startedAt.getEpochSecond();
// Calculate remaining time to run
terminateJobInSecs = maxRuntimeSecs - elapsedSeconds;
if (terminateJobInSecs <= 0) {
// Runtime has already reached terminate.
// trigger terminate in a second.
terminateJobInSecs = 1;
}
}
}
return terminateJobInSecs;
}
}
| 8,065 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/IMantisJobManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterProto.KillJobRequest;
import io.mantisrx.master.jobcluster.proto.JobProto;
import io.mantisrx.master.jobcluster.proto.JobProto.CheckHeartBeat;
import io.mantisrx.master.jobcluster.proto.JobProto.InitJob;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.scheduler.WorkerEvent;
/**
* An interface that declares the behavior of the Mantis Job Manager Actor.
*/
public interface IMantisJobManager {
/**
* Returns metadata associated with this job.
*
* @return
*/
IMantisJobMetadata getJobDetails();
/**
* Process the scheduling info request from a client by responding with
* {@link JobClusterManagerProto.GetJobSchedInfoResponse} which will stream details about the workers
* for this job.
*
* @param r
*/
void onGetJobStatusSubject(JobClusterManagerProto.GetJobSchedInfoRequest r);
/**
* Process the discovery info request from a client by responding with
*
* @param r {@link JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse} with latest discovery info for this job
*/
void onGetLatestJobDiscoveryInfo(JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest r);
/**
* Process worker related events. Update worker state and transition worker to new states.
*
* @param e
*/
void processWorkerEvent(WorkerEvent e);
/**
* Returns the {@link JobId} of this job.
*
* @return
*/
JobId getJobId();
/**
* Returns the current {@link JobState} of this job.
*
* @return
*/
JobState getJobState();
/**
* Invoked when all workers of this job have entered the Started state for the first time. This will
* transition the Job into Launched state.
*
* @return
*/
boolean onAllWorkersStarted();
/**
* Invoked when all workers of this job have been terminated. This will trigger final clean up of this job.
*/
void onAllWorkersCompleted();
/**
* Invoked when the number of automatic worker resubmits exceeds configured threshold. This will trigger
* a job shutdown.
*
* @return
*/
boolean onTooManyWorkerResubmits();
/**
* Invoked when a job termination request is received. This should tear down the job.
*
* @param state
* @param reason
*/
void shutdown(JobState state, String reason);
/**
* If the job had been launched with a runtime limit then this method gets invoked after that limit has
* been reached. The Job should then begin termination process.
*
* @param r
*/
void onRuntimeLimitReached(JobProto.RuntimeLimitReached r);
/**
* Invoked by the Job Cluster Actor to commence job initialization.
*
* @param i
*/
void onJobInitialize(InitJob i);
/**
* Returns Job details using {@link JobClusterManagerProto.GetJobDetailsResponse}.
*
* @param r
*/
void onGetJobDetails(GetJobDetailsRequest r);
/**
* Invoked at a periodic basis to make sure all workers of this job have sent heart beats within a
* preconfigured interval.
*
* @param r
*/
void onCheckHeartBeats(CheckHeartBeat r);
/**
* Invoked during Agent fleet deployment to move workers onto the new agent fleet.
*
* @param r
*/
void onMigrateWorkers(JobProto.MigrateDisabledVmWorkersRequest r);
/**
* Invoked to trigger job termination.
*
* @param req
*/
void onJobKill(KillJobRequest req);
/**
* Invoked by either Job Master or a user to change the number of workers of this job.
*
* @param scaleStage
*/
void onScaleStage(ScaleStageRequest scaleStage);
/**
* Invoked to explicitly resubmit a particular worker.
*
* @param r
*/
void onResubmitWorker(ResubmitWorkerRequest r);
/**
* Returns a list of active workers for this job using {@link JobClusterManagerProto.ListWorkersResponse}.
*
* @param request
*/
void onListActiveWorkers(JobClusterManagerProto.ListWorkersRequest request);
/**
* Send worker assignments if there have been changes.
* @param p
*/
void onSendWorkerAssignments(JobProto.SendWorkerAssignementsIfChanged p);
}
| 8,066 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/IMantisStageMetadata.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.exceptions.InvalidJobException;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonSubTypes;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonTypeInfo;
import java.util.Collection;
import java.util.List;
/**
* Represents Metadata associated with a Mantis Job stage.
*/
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS,
include = JsonTypeInfo.As.PROPERTY,
property = "type")
@JsonSubTypes({
@JsonSubTypes.Type(value = MantisStageMetadataImpl.class)
})
public interface IMantisStageMetadata {
/**
* Returns the {@link JobId} associated with this stage.
* @return
*/
JobId getJobId();
/**
* Returns the stage number of this stage.
* @return
*/
int getStageNum();
/**
* Returns the total number of stages.
* @return
*/
int getNumStages();
/**
* Returns the {@link MachineDefinition} associated with this stage. This is the resource configuration
* of the workers of this stage.
* @return
*/
MachineDefinition getMachineDefinition();
/**
* Returns the total number of workers for this tage.
* @return
*/
int getNumWorkers();
/**
* Returns the List of {@link JobConstraints} (mandatory) associated with this job.
* @return
*/
List<JobConstraints> getHardConstraints();
/**
* Returns the List of {@link JobConstraints} (best effort) associated with this job.
* @return
*/
List<JobConstraints> getSoftConstraints();
/**
* Returns the scaling policy {@link StageScalingPolicy} for this stage.
* @return
*/
StageScalingPolicy getScalingPolicy();
/**
* Returns true if this stage is scalable.
* @return
*/
boolean getScalable();
/**
* Get list of {@link JobWorker} associated with this stage.
* Use getAllWorkers instead.
* @return
*/
@Deprecated
Collection<JobWorker> getWorkerByIndexMetadataSet();
/**
* Get list of {@link JobWorker} associated with this stage.
* @return
*/
Collection<JobWorker> getAllWorkers();
/**
* Returns the {@link JobWorker} with the given index.
* @param workerIndex
*
* @return
*
* @throws InvalidJobException
*/
JobWorker getWorkerByIndex(int workerIndex) throws InvalidJobException;
/**
* Returns the {@link JobWorker} with the given worker number.
* @param workerNumber
*
* @return
*
* @throws InvalidJobException
*/
JobWorker getWorkerByWorkerNumber(int workerNumber) throws InvalidJobException;
}
| 8,067 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/IMantisJobMetadata.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import io.mantisrx.common.Label;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.master.domain.Costs;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.exceptions.InvalidJobException;
import java.net.URL;
import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.Optional;
/**
* The Metadata associated with a Mantis Job.
*/
public interface IMantisJobMetadata {
long DEFAULT_STARTED_AT_EPOCH = 0;
/**
* Returns the {@link JobId}.
* @return
*/
JobId getJobId();
/**
* Returns the Job Cluster Name for this job.
* @return
*/
String getClusterName();
/**
* Returns the submitter of this job.
* @return
*/
String getUser();
/**
* Returns the {@link Instant} this job was submitted.
* @return
*/
Instant getSubmittedAtInstant();
/**
* Returns an optional Instant this job went into started state.
* @return
*/
Optional<Instant> getStartedAtInstant();
/**
* Returns an optional Instant this job completed.
* @return
*/
Optional<Instant> getEndedAtInstant();
/**
* Returns the artifact associated with this job.
* @return
*/
String getArtifactName();
/**
* Returns an optional {@link JobSla} for this job if it exists.
* @return
*/
Optional<JobSla> getSla();
/**
* Returns the subscription timeout in seconds associated with this job.
* @return
*/
long getSubscriptionTimeoutSecs();
/**
* Returns the current state of this job.
* @return
*/
JobState getState();
/**
* Returns the list of {@link Parameter} associated with this job.
* @return
*/
List<Parameter> getParameters();
/**
* Returns the list of {@link Label} associated with this job.
* @return
*/
List<Label> getLabels();
/**
* Returns metadata about all the stages of this job.
* @return
*/
Map<Integer, ? extends IMantisStageMetadata> getStageMetadata();
/**
* Returns a count of the number of stages in this job.
* @return
*/
int getTotalStages();
/**
* Returns {@link IMantisStageMetadata} for the stage identified by the given stage number if one exists.
* @param stageNum
* @return
*/
Optional<IMantisStageMetadata> getStageMetadata(int stageNum);
/**
* Returns {@link JobWorker} associated with the given stage number and worker index if one exists.
* @param stageNumber
* @param workerIndex
* @return
* @throws InvalidJobException
*/
Optional<JobWorker> getWorkerByIndex(int stageNumber, int workerIndex) throws InvalidJobException;
/**
* Returns {@link JobWorker} associated with the given stage number and worker number if one exists.
* @param workerNumber
* @return
* @throws InvalidJobException
*/
Optional<JobWorker> getWorkerByNumber(int workerNumber) throws InvalidJobException;
/**
* Worker numbers are assigned in an incremental fashion. This method returns the next number to use.
* @return
*/
int getNextWorkerNumberToUse();
/**
* Returns the {@link SchedulingInfo} associated with this job.
* @return
*/
SchedulingInfo getSchedulingInfo();
/**
* Returns the min runtime in seconds associated with this job (defaults to -1).
* @return
*/
long getMinRuntimeSecs();
/**
* Returns a {@link URL} pointing to the artifact used by this job. In reality this is not interpreted
* as a URL. The trailing portion of this is used to identify the artifact.
* @return
*/
URL getJobJarUrl();
/**
* Returns the {@link JobDefinition} associated with this Job.
* @return
*/
JobDefinition getJobDefinition();
/**
* Returns the costs associated with this job.
* @return Costs
*/
Costs getJobCosts();
/**
* Job level heartbeat configuration
*/
long getHeartbeatIntervalSecs();
/**
* Job level timeout interval for worker
* This resubmits a worker if existing worker
* is past timeout secs
*/
long getWorkerTimeoutSecs();
}
| 8,068 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/FilterableMantisWorkerMetadataWritable.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import io.mantisrx.server.master.store.MantisWorkerMetadataWritable;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
@JsonFilter("workerMetadataList")
public class FilterableMantisWorkerMetadataWritable extends MantisWorkerMetadataWritable {
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown=true)
public FilterableMantisWorkerMetadataWritable(@JsonProperty("workerIndex") int workerIndex,
@JsonProperty("workerNumber") int workerNumber,
@JsonProperty("jobId") String jobId,
@JsonProperty("stageNum") int stageNum,
@JsonProperty("numberOfPorts") int numberOfPorts) {
super(workerIndex, workerNumber, jobId, stageNum, numberOfPorts);
}
}
| 8,069 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/JobState.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import java.util.HashMap;
import java.util.Map;
/**
* Declares the states a Job can be in.
*/
public enum JobState {
/**
* The Initial job state.
*/
Accepted,
/**
* Indicates the job is running.
*/
Launched, // scheduled and sent to slave
/**
* Indicates the job is in the process of terminating due to an error.
*/
Terminating_abnormal,
/**
* Indicates the job is in the process of terminating due to normal reasons.
*/
Terminating_normal,
/**
* Indicates job is in terminal state and that the termination was abnormal.
*/
Failed, // OK to handle as a resubmit
/**
* Indicates job is in terminal state and that the termination was normal.
*/
Completed, // terminal state, not necessarily successful
/**
* Place holder state.
*/
Noop; // internal use only
private static final Map<JobState, JobState[]> VALID_CHANGES;
private static final Map<JobState, MetaState> META_STATES;
static {
VALID_CHANGES = new HashMap<>();
VALID_CHANGES.put(Accepted, new JobState[] {
Accepted, Launched, Terminating_abnormal, Terminating_normal,
Failed, Completed
});
VALID_CHANGES.put(Launched, new JobState[] {
Launched, Terminating_abnormal, Terminating_normal,
Failed, Completed
});
VALID_CHANGES.put(Terminating_abnormal, new JobState[] {
Terminating_abnormal, Failed
});
VALID_CHANGES.put(Terminating_normal, new JobState[] {
Terminating_normal, Completed
});
VALID_CHANGES.put(Failed, new JobState[] {});
VALID_CHANGES.put(Completed, new JobState[] {});
META_STATES = new HashMap<>();
META_STATES.put(Accepted, MetaState.Active);
META_STATES.put(Launched, MetaState.Active);
META_STATES.put(Failed, MetaState.Terminal);
META_STATES.put(Completed, MetaState.Terminal);
META_STATES.put(Terminating_abnormal, MetaState.Terminal);
META_STATES.put(Terminating_normal, MetaState.Terminal);
}
/**
* A higher level roll up of states indicating active or terminal status of the job.
*/
public enum MetaState {
/**
* Indicates the job is active.
*/
Active,
/**
* Indicates the job is completed.
*/
Terminal
}
/**
* Rolls up given {@link JobState} to a {@link MetaState}.
*
* @param state
*
* @return
*/
public static MetaState toMetaState(JobState state) {
return META_STATES.get(state);
}
/**
* Checks if the transition to the given state is valid from current state.
*
* @param newState
*
* @return
*/
public boolean isValidStateChgTo(JobState newState) {
for (JobState validState : VALID_CHANGES.get(this))
if (validState == newState)
return true;
return false;
}
/**
* Returns true if the current state is terminal.
*
* @param state
*
* @return
*/
public static boolean isTerminalState(JobState state) {
switch (state) {
case Failed:
case Completed:
case Terminating_normal:
case Terminating_abnormal:
return true;
default:
return false;
}
}
public boolean isTerminal() {
return isTerminalState(this);
}
/**
* Returns true if the current state is abnormal.
*
* @param started
*
* @return
*/
public static boolean isErrorState(JobState started) {
switch (started) {
case Failed:
case Terminating_abnormal:
return true;
default:
return false;
}
}
/**
* Returns true if the job is active.
*
* @param state
*
* @return
*/
public static boolean isRunningState(JobState state) {
switch (state) {
case Launched:
return true;
default:
return false;
}
}
}
| 8,070 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/CostsCalculator.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import io.mantisrx.server.master.domain.Costs;
/**
* Calculates the cost of a job.
*/
@FunctionalInterface
public interface CostsCalculator {
Costs calculateCosts(IMantisJobMetadata jobMetadata);
static CostsCalculator noop() {
return new NoopCostsCalculator();
}
}
| 8,071 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/worker/JobWorker.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job.worker;
import static io.mantisrx.master.events.LifecycleEventsProto.StatusEvent;
import static io.mantisrx.master.events.LifecycleEventsProto.WorkerStatusEvent;
import static java.util.Optional.ofNullable;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.jobcluster.job.IMantisWorkerEventProcessor;
import io.mantisrx.master.jobcluster.job.JobActor;
import io.mantisrx.master.scheduler.WorkerStateAdapter;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.StatusPayloads;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.persistence.exceptions.InvalidWorkerStateChangeException;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.scheduler.*;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class encapsulates information about a worker of a job.
*/
public class JobWorker implements IMantisWorkerEventProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(JobWorker.class);
private final IMantisWorkerMetadata metadata;
private final LifecycleEventPublisher eventPublisher;
private final Metrics metrics;
private final MetricGroupId metricsGroupId;
private final Counter numWorkerLaunched;
private final Counter numWorkerTerminated;
private final Counter numWorkerLaunchFailed;
private final Counter numWorkerUnschedulable;
private final Counter numWorkersDisabledVM;
private final Counter numHeartBeatsReceived;
private final Gauge lastWorkerLaunchToStartMillis;
/**
* Creates an instance of JobWorker.
* @param metadata The {@link IMantisWorkerMetadata} for this worker.
* @param eventPublisher A {@link LifecycleEventPublisher} where lifecycle events are to be sent.
*/
public JobWorker(final IMantisWorkerMetadata metadata,
final LifecycleEventPublisher eventPublisher) {
Preconditions.checkNotNull(metadata, "metadata");
this.metadata = metadata;
this.eventPublisher = eventPublisher;
this.metricsGroupId = new MetricGroupId("JobWorker", new BasicTag("jobId", this.metadata.getJobId()));
Metrics m = new Metrics.Builder()
.id(metricsGroupId)
.addCounter("numWorkerLaunched")
.addCounter("numWorkerTerminated")
.addCounter("numWorkerLaunchFailed")
.addCounter("numWorkerUnschedulable")
.addCounter("numWorkersDisabledVM")
.addCounter("numHeartBeatsReceived")
.addGauge("lastWorkerLaunchToStartMillis")
.build();
this.metrics = MetricsRegistry.getInstance().registerAndGet(m);
this.numWorkerLaunched = metrics.getCounter("numWorkerLaunched");
this.numWorkerTerminated = metrics.getCounter("numWorkerTerminated");
this.numWorkerLaunchFailed = metrics.getCounter("numWorkerLaunchFailed");
this.numWorkerUnschedulable = metrics.getCounter("numWorkerUnschedulable");
this.numWorkersDisabledVM = metrics.getCounter("numWorkersDisabledVM");
this.numHeartBeatsReceived = metrics.getCounter("numHeartBeatsReceived");
this.lastWorkerLaunchToStartMillis = metrics.getGauge("lastWorkerLaunchToStartMillis");
}
public IMantisWorkerMetadata getMetadata() {
return metadata;
}
// Setters on mutable metadata
private MantisWorkerMetadataImpl mutableMetadata() {
if (metadata instanceof MantisWorkerMetadataImpl) {
return (MantisWorkerMetadataImpl) metadata;
} else {
throw new IllegalStateException();
}
}
private void setState(WorkerState newState, long when, JobCompletedReason reason)
throws InvalidWorkerStateChangeException {
mutableMetadata().setState(newState, when, reason);
}
private void setLastHeartbeatAt(long lastHeartbeatAt) {
mutableMetadata().setLastHeartbeatAt(lastHeartbeatAt);
}
private void setSlave(String slave) {
mutableMetadata().setSlave(slave);
}
private void setSlaveID(String slaveID) {
mutableMetadata().setSlaveID(slaveID);
}
private void setCluster(Optional<String> cluster) {
mutableMetadata().setCluster(cluster);
}
private void setResourceCluster(ClusterID clusterID) {
mutableMetadata().setResourceCluster(clusterID);
}
/**
* Marks the worker as being subscribed.
* @param isSub
*/
void setIsSubscribed(boolean isSub) {
mutableMetadata().setIsSubscribed(isSub);
}
/**
* Adds the associated ports data.
* @param ports
*/
void addPorts(final WorkerPorts ports) {
mutableMetadata().addPorts(ports);
}
// Worker Event handlers
/**
* All events associated to this worker are processed in this method.
*
* @param workerEvent The {@link WorkerEvent} associated with this worker.
*
* @return boolean indicating whether to a change worth persisting occurred.
*
* @throws InvalidWorkerStateChangeException thrown if the worker event lead to an invalid state transition.
*/
public boolean processEvent(WorkerEvent workerEvent) throws InvalidWorkerStateChangeException {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Processing event {} for worker {}", workerEvent, metadata.getWorkerId());
}
boolean persistStateRequired = false;
if (workerEvent instanceof WorkerLaunched) {
persistStateRequired = onWorkerLaunched((WorkerLaunched) workerEvent);
} else if (workerEvent instanceof WorkerLaunchFailed) {
persistStateRequired = onWorkerLaunchFailed((WorkerLaunchFailed) workerEvent);
} else if (workerEvent instanceof WorkerUnscheduleable) {
persistStateRequired = onWorkerUnscheduleable((WorkerUnscheduleable) workerEvent);
} else if (workerEvent instanceof WorkerResourceStatus) {
persistStateRequired = onWorkerResourceStatus((WorkerResourceStatus) workerEvent);
} else if (workerEvent instanceof WorkerHeartbeat) {
persistStateRequired = onHeartBeat((WorkerHeartbeat) workerEvent);
} else if (workerEvent instanceof WorkerTerminate) {
persistStateRequired = onTerminate((WorkerTerminate) workerEvent);
} else if (workerEvent instanceof WorkerOnDisabledVM) {
persistStateRequired = onDisabledVM((WorkerOnDisabledVM) workerEvent);
} else if (workerEvent instanceof WorkerStatus) {
persistStateRequired = onWorkerStatus((WorkerStatus) workerEvent);
}
return persistStateRequired;
}
private boolean onWorkerStatus(WorkerStatus workerEvent) throws InvalidWorkerStateChangeException {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("on WorkerStatus for {}", workerEvent);
}
switch (workerEvent.getState()) {
case StartInitiated:
case Started:
case Completed:
case Failed:
setState(workerEvent.getState(), workerEvent.getEventTimeMs(), workerEvent.getStatus().getReason());
eventPublisher.publishStatusEvent(new WorkerStatusEvent(
StatusEvent.StatusEventType.INFO,
"worker status update", metadata.getStageNum(), workerEvent.getWorkerId(),
workerEvent.getState()));
return true;
case Launched:
case Accepted:
case Noop:
case Unknown:
default:
LOGGER.warn("unexpected worker state {} in WorkerStatus update", workerEvent.getState().name());
break;
}
return false;
}
private boolean onDisabledVM(WorkerOnDisabledVM workerEvent) {
numWorkersDisabledVM.increment();
LOGGER.info("on WorkerDisabledVM for {}", workerEvent);
return false;
}
private boolean onTerminate(WorkerTerminate workerEvent) throws InvalidWorkerStateChangeException {
numWorkerTerminated.increment();
setState(workerEvent.getFinalState(), workerEvent.getEventTimeMs(), workerEvent.getReason());
eventPublisher.publishStatusEvent(new WorkerStatusEvent(
StatusEvent.StatusEventType.INFO,
"worker terminated", -1, workerEvent.getWorkerId(), WorkerState.Failed,
ofNullable(metadata.getSlave())));
return true;
}
/**
* Updates this {@link JobWorker}'s metadata from a {@link WorkerLaunched} event received by Mesos.
* This method will update metadata followed by updating the worker's state via
* {@link JobWorker#setState(WorkerState, long, JobCompletedReason)}. If any of the metadata
* fails to save, an {@link InvalidWorkerStateChangeException} is thrown and eventually bubbled up to the
* corresponding {@link JobActor} to handle.
*
* @param workerEvent an event received by Mesos with worker metadata after it was launched.
*
* @return {@code true} if all saving state succeeds
* {@code false} otherwise (and don't durably persist; expect worker to be relaunched because
* our state doesn't match Mesos)
*/
private boolean onWorkerLaunched(WorkerLaunched workerEvent) throws InvalidWorkerStateChangeException {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Processing for worker {} with id {}", workerEvent, metadata.getWorkerId());
}
setSlave(workerEvent.getHostname());
addPorts(workerEvent.getPorts());
setSlaveID(workerEvent.getVmId());
setCluster(workerEvent.getClusterName());
workerEvent.getResourceCluster().ifPresent(this::setResourceCluster);
setState(WorkerState.Launched, workerEvent.getEventTimeMs(), JobCompletedReason.Normal);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Worker {} state changed to Launched", workerEvent.getWorkerId());
}
numWorkerLaunched.increment();
try {
eventPublisher.publishStatusEvent(new WorkerStatusEvent(
StatusEvent.StatusEventType.INFO,
"scheduled on " + workerEvent.getHostname() + " with ports "
+ Jackson.toJson(workerEvent.getPorts()), workerEvent.getStageNum(),
workerEvent.getWorkerId(), WorkerState.Launched));
} catch (IOException e) {
LOGGER.warn("Error publishing status event for worker {} launch", workerEvent.getWorkerId(), e);
}
return true;
}
// handle worker status update from Mesos
private boolean onWorkerResourceStatus(final WorkerResourceStatus workerEvent)
throws InvalidWorkerStateChangeException {
WorkerState workerStateFromEvent = WorkerStateAdapter.from(workerEvent.getState());
// if worker current state is terminated, but we get a resource update from Mesos
// saying worker is still running, terminate the task
if (WorkerState.isRunningState(workerStateFromEvent)) {
if (WorkerState.isTerminalState(metadata.getState())) {
numWorkerTerminated.increment();
// kill worker
}
}
// Resource status is terminal but our metadata shows worker as running => update our worker state
// based on event and
if (WorkerState.isTerminalState(workerStateFromEvent)) {
if (!WorkerState.isTerminalState(metadata.getState())) {
LOGGER.info("Worker {} state changed to {}", this, workerEvent.getState());
setState(workerStateFromEvent, workerEvent.getEventTimeMs(), JobCompletedReason.Normal);
eventPublisher.publishStatusEvent(new WorkerStatusEvent(
StatusEvent.StatusEventType.INFO,
"worker resource state " + workerEvent.getMessage(), -1,
workerEvent.getWorkerId(), workerStateFromEvent, ofNullable(metadata.getSlave())));
return true;
}
}
return false;
}
/**
* Handles a {@link WorkerHeartbeat} event.
*
* Assumptions:
*
* 1. Heartbeats from workers of terminated jobs are ignored at a higher level.
*
* @param workerEvent a {@link WorkerHeartbeat} event.
*
* @throws InvalidWorkerStateChangeException if it fails to persist worker state
* via {@link JobWorker#setState(WorkerState, long, JobCompletedReason)}.
*/
private boolean onHeartBeat(WorkerHeartbeat workerEvent) throws InvalidWorkerStateChangeException {
numHeartBeatsReceived.increment();
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Job {} Processing onHeartBeat for {}", this.metadata.getJobId(),
metadata.getWorkerId());
}
WorkerState workerState = metadata.getState();
setLastHeartbeatAt(workerEvent.getEventTimeMs());
boolean persistStateRequired = false;
if (workerState != WorkerState.Started) {
setState(WorkerState.Started, workerEvent.getEventTimeMs(), JobCompletedReason.Normal);
persistStateRequired = true;
final long startLatency = workerEvent.getEventTimeMs() - metadata.getLaunchedAt();
if (startLatency > 0) {
lastWorkerLaunchToStartMillis.set(startLatency);
} else {
LOGGER.info("Unexpected error when computing startlatency for {} start time {} launch time {}",
workerEvent.getWorkerId().getId(), workerEvent.getEventTimeMs(), metadata.getLaunchedAt());
}
LOGGER.info("Job {} Worker {} started ", metadata.getJobId(), metadata.getWorkerId());
eventPublisher.publishStatusEvent(new WorkerStatusEvent(
StatusEvent.StatusEventType.INFO,
"setting worker Started on heartbeat", workerEvent.getStatus().getStageNum(),
workerEvent.getWorkerId(), WorkerState.Started, ofNullable(metadata.getSlave())));
}
List<Status.Payload> payloads = workerEvent.getStatus().getPayloads();
for (Status.Payload payload : payloads) {
if (payload.getType().equals(StatusPayloads.Type.SubscriptionState.toString())) {
String data = payload.getData();
try {
boolean subscriptionStatus = Boolean.parseBoolean(data);
if (getMetadata().getIsSubscribed() != subscriptionStatus) {
setIsSubscribed(subscriptionStatus);
persistStateRequired = true;
}
} catch (Exception e) {
// could not parse subscriptionstatus
LOGGER.warn("Exception parsing subscription payload", e);
}
}
}
return persistStateRequired;
}
private boolean onWorkerLaunchFailed(WorkerLaunchFailed workerEvent) throws InvalidWorkerStateChangeException {
numWorkerLaunchFailed.increment();
setState(WorkerState.Failed, workerEvent.getEventTimeMs(), JobCompletedReason.Error);
eventPublisher.publishStatusEvent(new WorkerStatusEvent(
StatusEvent.StatusEventType.ERROR,
"worker launch failed, reason: " + workerEvent.getErrorMessage(), workerEvent.getStageNum(),
workerEvent.getWorkerId(), WorkerState.Failed));
return true;
}
private boolean onWorkerUnscheduleable(WorkerUnscheduleable workerEvent) {
// we shouldn't reach here for Worker Unscheduleable events, as Job Actor would update the readyAt time
// in the JobActor on receiving this event
numWorkerUnschedulable.increment();
return true;
}
/**
* Processes a {@link WorkerEvent} and if successful, saves/update state in the {@link MantisJobStore}.
*
* @param event a worker event which can be one of many event types such as launched, heartbeat, etc.
* @param jobStore a place to persist metadata.
*
* @throws InvalidWorkerStateChangeException if a worker failed to persist its state.
* @throws IOException if the job store failed to update the worker metadata.
*/
@Override
public void processEvent(final WorkerEvent event, final MantisJobStore jobStore)
throws InvalidWorkerStateChangeException, IOException {
if (event.getWorkerId().equals(this.metadata.getWorkerId())) {
boolean persistStateRequired = processEvent(event);
if (persistStateRequired) {
jobStore.updateWorker(this.metadata);
}
} else {
LOGGER.warn("Current workerId is " + this.metadata.getWorkerId()
+ " event received from workerId " + event.getWorkerId() + " ignoring");
// pbly event from an old worker number
}
}
/**
* Builder to enable fluid creation of a {@link JobWorker}.
*/
public static class Builder {
private static final int INVALID_VALUE = -1;
private int workerIndex = INVALID_VALUE;
private int workerNumber = INVALID_VALUE;
private String jobId = null;
private int stageNum = INVALID_VALUE;
private int numberOfPorts = INVALID_VALUE;
private WorkerPorts workerPorts = null;
private WorkerState state = WorkerState.Accepted;
private String slave = null;
private String slaveID = null;
private long acceptedAt = System.currentTimeMillis();
private long launchedAt = -1;
private long startingAt = -1;
private long startedAt = -1;
private long completedAt = -1;
private JobCompletedReason reason = JobCompletedReason.Normal;
private int resubmitOf = 0;
private int totalResubmitCount = 0;
private Optional<String> preferredCluster = Optional.empty();
private Optional<ClusterID> resourceCluster = Optional.empty();
private IMantisWorkerMetadata metadata;
private LifecycleEventPublisher eventPublisher;
/**
* Default constructor.
*/
public Builder() {
}
/**
* Required. WorkerIndex of this worker.
* @param ind
* @return
*/
public JobWorker.Builder withWorkerIndex(int ind) {
this.workerIndex = ind;
return this;
}
/**
* Required. Worker number associated with this worker.
* @param num
* @return
*/
public JobWorker.Builder withWorkerNumber(int num) {
this.workerNumber = num;
return this;
}
/**
* Optional. Resubmit count associated with this workerIndex.
* @param c
* @return
*/
public JobWorker.Builder withResubmitCount(int c) {
this.totalResubmitCount = c;
return this;
}
/**
* Optional. If this is a resubmit of an old worker then the Worker Number of the old worker.
* @param r
* @return
*/
public JobWorker.Builder withResubmitOf(int r) {
this.resubmitOf = r;
return this;
}
/**
* Required. Job id for this worker.
* @param jid
* @return
*/
public JobWorker.Builder withJobId(String jid) {
this.jobId = jid;
return this;
}
/**
* Required (if String version not used). {@link JobId} of the job of this worker.
* @param jid
* @return
*/
public JobWorker.Builder withJobId(JobId jid) {
this.jobId = jid.getId();
return this;
}
/**
* Required. Stage number for this worker.
* @param num
* @return
*/
public JobWorker.Builder withStageNum(int num) {
this.stageNum = num;
return this;
}
/**
* Required. Number of ports to be assigned to this worker.
* @param portNums
* @return
*/
public JobWorker.Builder withNumberOfPorts(int portNums) {
this.numberOfPorts = portNums;
return this;
}
/**
* Required. Details of the ports assigned to this worker.
* @param workerP
* @return
*/
public JobWorker.Builder withWorkerPorts(WorkerPorts workerP) {
this.workerPorts = workerP;
return this;
}
/**
* Optional. The {@link WorkerState} associated with this worker.
* @param state
* @return
*/
public JobWorker.Builder withState(WorkerState state) {
this.state = state;
return this;
}
/**
* (Optional) Mesos Slave on which this worker is executing.
* @param slave
* @return
*/
public JobWorker.Builder withSlave(String slave) {
this.slave = slave;
return this;
}
/**
* (Optional) Mesos slave Id on which this worker is executing.
* @param slaveid
* @return
*/
public JobWorker.Builder withSlaveID(String slaveid) {
this.slaveID = slaveid;
return this;
}
/**
* (Optional) The timestamp this worker went into accepted state.
* @param acc
* @return
*/
public JobWorker.Builder withAcceptedAt(long acc) {
this.acceptedAt = acc;
return this;
}
/**
* (Optional) The timestamp this worker went into launched state.
* @param la
* @return
*/
public JobWorker.Builder withLaunchedAt(long la) {
this.launchedAt = la;
return this;
}
/**
* (Optional) The timestamp this worker went into starting state.
* @param sa
* @return
*/
public JobWorker.Builder withStartingAt(long sa) {
this.startingAt = sa;
return this;
}
/**
* (Optional) The timestamp this worker went into started state.
* @param sa
* @return
*/
public JobWorker.Builder withStartedAt(long sa) {
this.startedAt = sa;
return this;
}
/**
* (Optional) The timestamp this worker went into terminal state.
* @param ca
* @return
*/
public JobWorker.Builder withCompletedAt(long ca) {
this.completedAt = ca;
return this;
}
/**
* (Optional) The preferred cluster where this worker should be scheduled.
* @param preferredCluster
* @return
*/
public JobWorker.Builder withPreferredCluster(Optional<String> preferredCluster) {
this.preferredCluster = preferredCluster;
return this;
}
public JobWorker.Builder withResourceCluster(ClusterID resourceCluster) {
this.resourceCluster = Optional.of(resourceCluster);
return this;
}
/**
* (Optional) The reason for worker termination.
* @param reason
* @return
*/
public JobWorker.Builder withJobCompletedReason(JobCompletedReason reason) {
this.reason = reason;
return this;
}
/**
* (Required) The listener where worker lifecycle events are published.
* @param publisher
* @return
*/
public JobWorker.Builder withLifecycleEventsPublisher(LifecycleEventPublisher publisher) {
this.eventPublisher = publisher;
return this;
}
/**
* Helper builder which clones from an instance of {@link IMantisWorkerMetadata} object.
* @param cloneFrom
* @return
*/
public JobWorker.Builder from(IMantisWorkerMetadata cloneFrom) {
workerIndex = cloneFrom.getWorkerIndex();
workerNumber = cloneFrom.getWorkerNumber();
jobId = cloneFrom.getJobId();
stageNum = cloneFrom.getStageNum();
numberOfPorts = cloneFrom.getNumberOfPorts();
if (cloneFrom.getPorts().isPresent()) {
workerPorts = cloneFrom.getPorts().get();
}
state = cloneFrom.getState();
slave = cloneFrom.getSlave();
slaveID = cloneFrom.getSlaveID();
acceptedAt = cloneFrom.getAcceptedAt();
launchedAt = cloneFrom.getLaunchedAt();
startingAt = cloneFrom.getStartingAt();
startedAt = cloneFrom.getStartedAt();
completedAt = cloneFrom.getCompletedAt();
reason = cloneFrom.getReason();
resubmitOf = cloneFrom.getResubmitOf();
totalResubmitCount = cloneFrom.getTotalResubmitCount();
preferredCluster = cloneFrom.getPreferredClusterOptional();
resourceCluster = cloneFrom.getResourceCluster();
return this;
}
/**
* Creates and returns an instance of {@link JobWorker}.
* @return
*/
public JobWorker build() {
Objects.requireNonNull(jobId, "Job Id cannot be null");
if (workerIndex <= INVALID_VALUE) {
IllegalArgumentException ex = new IllegalArgumentException(
String.format("Invalid workerIndex %d specified", workerIndex));
LOGGER.error("Invalid worker index specified {}", workerIndex, ex);
throw ex;
}
if (workerNumber <= INVALID_VALUE) {
LOGGER.error("Invalid worker number specified {}", workerNumber);
throw new IllegalArgumentException(String.format("Invalid workerNumber %d specified", workerNumber));
}
if (stageNum <= INVALID_VALUE) {
LOGGER.error("Invalid stage num specified {}", stageNum);
throw new IllegalArgumentException(String.format("Invalid stageNum %d specified", stageNum));
}
if (numberOfPorts <= INVALID_VALUE) {
LOGGER.error("Invalid num ports specified {}", numberOfPorts);
throw new IllegalArgumentException(String.format("Invalid no of Ports %d specified", numberOfPorts));
}
if (totalResubmitCount < 0) {
LOGGER.error("Invalid resubmit count specified {}", totalResubmitCount);
throw new IllegalArgumentException(
String.format("Invalid resubmit Count %d specified", totalResubmitCount));
}
if (eventPublisher == null) {
IllegalArgumentException ex = new IllegalArgumentException(
"lifecycle event publisher cannot be null");
LOGGER.error("lifecycle event publisher is null", ex);
throw ex;
}
this.metadata = new MantisWorkerMetadataImpl(workerIndex,
workerNumber,
jobId,
stageNum,
numberOfPorts,
workerPorts,
state,
slave,
slaveID,
acceptedAt,
launchedAt,
startingAt,
startedAt,
completedAt,
reason,
resubmitOf,
totalResubmitCount,
preferredCluster,
resourceCluster
);
return new JobWorker(this.metadata, this.eventPublisher);
}
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final JobWorker jobWorker = (JobWorker) o;
return Objects.equals(metadata, jobWorker.metadata)
&& Objects.equals(eventPublisher, jobWorker.eventPublisher);
}
@Override
public int hashCode() {
return Objects.hash(metadata, eventPublisher);
}
@Override
public String toString() {
return "JobWorker{" + "metadata=" + metadata + '}';
}
}
| 8,072 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/worker/WorkerHeartbeat.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job.worker;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import java.time.Instant;
/**
* A WorkerHeartbeat object encapsulates the heart beat data sent by the worker to the master.
*/
public class WorkerHeartbeat implements WorkerEvent {
private final WorkerId workerId;
private final Status heartBeat;
private final WorkerState workerState;
private final long time;
/**
* Creates an instance of this class from a {@link Status} object.
* @param hb
*/
public WorkerHeartbeat(Status hb) {
this(hb, Instant.ofEpochMilli(hb.getTimestamp()));
}
/**
* For testing only.
*
* @param hb
* @param time
*/
public WorkerHeartbeat(Status hb, Instant time) {
this.heartBeat = hb;
this.time = time.toEpochMilli();
workerId = hb.getWorkerId().get();
workerState = setWorkerState(heartBeat.getState());
}
private WorkerState setWorkerState(MantisJobState state) {
switch (state) {
case Launched:
return WorkerState.Launched;
case Started:
return WorkerState.Started;
case StartInitiated:
return WorkerState.StartInitiated;
case Completed:
return WorkerState.Completed;
case Failed:
return WorkerState.Failed;
case Noop:
return WorkerState.Noop;
default:
return WorkerState.Unknown;
}
}
@Override
public WorkerId getWorkerId() {
return this.workerId;
}
public Status getStatus() {
return this.heartBeat;
}
public WorkerState getState() {
return workerState;
}
@Override
public long getEventTimeMs() {
return this.time;
}
@Override
public String toString() {
return "WorkerHeartbeat [workerId=" + workerId + ", heartBeat="
+ heartBeat + ", workerState=" + workerState
+ ", time=" + time + "]";
}
}
| 8,073 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/worker/WorkerStatus.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job.worker;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import java.time.Instant;
/**
* Encapsulates the status data sent by the worker to the master.
*/
public class WorkerStatus implements WorkerEvent {
private final WorkerId workerId;
private final Status heartBeat;
private final WorkerState workerState;
private final long time;
/**
* Creates an instance using the given {@link Status}.
* @param hb
*/
public WorkerStatus(Status hb) {
this(hb, Instant.ofEpochMilli(hb.getTimestamp()));
}
/**
* Used for testing.
*
* @param hb
* @param time artificially inject time
*/
public WorkerStatus(Status hb, Instant time) {
this.heartBeat = hb;
this.time = time.toEpochMilli();
workerId = hb.getWorkerId().get();
workerState = setWorkerState(heartBeat.getState());
}
private WorkerState setWorkerState(MantisJobState state) {
switch (state) {
case Launched:
return WorkerState.Launched;
case Started:
return WorkerState.Started;
case StartInitiated:
return WorkerState.StartInitiated;
case Completed:
return WorkerState.Completed;
case Failed:
return WorkerState.Failed;
case Noop:
return WorkerState.Noop;
default:
return WorkerState.Unknown;
}
}
@Override
public WorkerId getWorkerId() {
return this.workerId;
}
public Status getStatus() {
return this.heartBeat;
}
public WorkerState getState() {
return workerState;
}
@Override
public long getEventTimeMs() {
return this.time;
}
@Override
public String toString() {
return "WorkerHeartbeat [workerId=" + workerId + ", heartBeat=" + heartBeat + ", workerState=" + workerState
+ ", time=" + time + "]";
}
}
| 8,074 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/worker/IMantisWorkerMetadata.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job.worker;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonSubTypes;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonTypeInfo;
import java.time.Instant;
import java.util.Optional;
/**
* Metadata object for a Mantis worker. Modification operations do not perform locking. Instead, a lock can be
* obtained via the <code>obtainLock()</code> method which is an instance of {@link java.lang.AutoCloseable}.
*/
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS,
include = JsonTypeInfo.As.PROPERTY,
property = "type")
@JsonSubTypes({
@JsonSubTypes.Type(value = MantisWorkerMetadataImpl.class)
})
public interface IMantisWorkerMetadata {
/**
* Index assigned to this worker.
* @return
*/
int getWorkerIndex();
/**
* Number assigned to this worker.
* @return
*/
int getWorkerNumber();
/**
* JobId of the Job this worker belongs to.
* @return
*/
String getJobId();
/**
* Returns the {@link JobId} for this worker.
* @return
*/
@JsonIgnore
JobId getJobIdObject();
/**
* Returns the {@link WorkerId} associated with this worker.
* @return
*/
WorkerId getWorkerId();
/**
* The stage number this worker belongs to.
* @return
*/
int getStageNum();
/**
* @return the {@link WorkerPorts} for this worker.
*/
WorkerPorts getWorkerPorts();
/**
* The port on which Metrics stream is served.
* @return
*/
int getMetricsPort();
/**
* The port which can be used to connect jconsole to.
* @return
*/
int getDebugPort();
/**
* A custom port associated with Netflix Admin console if enabled.
* @return
*/
int getConsolePort();
/**
* A free form port to be used by the job for any purpose.
* @return
*/
int getCustomPort();
/**
* The port which can be used to connect to other workers.
*
* @return
*/
int getSinkPort();
/**
* The AWS cluster on which the worker was launched.
* Used to maintain affinity during deploys.
* @return
*/
Optional<String> getCluster();
Optional<ClusterID> getResourceCluster();
/**
* Get number of ports for this worker, including the metrics port.
* @return The number of ports
*/
int getNumberOfPorts();
/**
* Returns an optional of {@link WorkerPorts} associated with this worker.
* @return
*/
Optional<WorkerPorts> getPorts();
/**
* A count of the number of times this worker has been resubmitted.
* @return
*/
int getTotalResubmitCount();
/**
* Get the worker number (not index) of which this is a resubmission of.
* @return
*/
int getResubmitOf();
/**
* Returns the current {@link WorkerState} of this worker.
* @return
*/
WorkerState getState();
/**
* Returns the mesos slave on which this worker is executing.
* @return
*/
String getSlave();
/**
* Returns the mesos slaveId on which this worker is executing.
* @return
*/
String getSlaveID();
/**
* Returns whether a listener exists that is streaming the results computed by this worker.
* @return
*/
boolean getIsSubscribed();
/**
* The timestamp at which this worker went into Accepted state.
* @return
*/
long getAcceptedAt();
/**
* The timestamp at which this worker landed on a mesos slave.
* @return
*/
long getLaunchedAt();
/**
* The timestamp at which this worker started initialization.
* @return
*/
long getStartingAt();
/**
* The timestamp the worker reported as running.
* @return
*/
long getStartedAt();
/**
* The timestamp the worker was marked for termination.
* @return
*/
long getCompletedAt();
/**
* If in terminal state returns the reason for completion.
* @return
*/
JobCompletedReason getReason();
/**
* The preferred AWS cluster on which to schedule this worker.
* @return
*/
Optional<String> getPreferredClusterOptional();
/**
* The last time a heartbeat was received from this worker.
* @return
*/
Optional<Instant> getLastHeartbeatAt();
}
| 8,075 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/worker/MantisWorkerMetadataImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job.worker;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.exceptions.InvalidWorkerStateChangeException;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.time.Instant;
import java.util.Objects;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Holds metadata related to a Mantis Worker.
*/
@JsonFilter("topLevelFilter")
public class MantisWorkerMetadataImpl implements IMantisWorkerMetadata {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisWorkerMetadataImpl.class);
/**
* metrics, debug, console and custom port.
*/
@JsonIgnore
public static final int MANTIS_SYSTEM_ALLOCATED_NUM_PORTS = 4;
private final int workerIndex;
private int workerNumber;
private String jobId;
@JsonIgnore
private JobId jobIdObj;
private final int stageNum;
private final int numberOfPorts;
@JsonIgnore
private final WorkerId workerId;
private WorkerPorts workerPorts;
private volatile WorkerState state;
private volatile String slave;
private volatile String slaveID;
private volatile long acceptedAt = 0;
private volatile long launchedAt = 0;
private volatile long startingAt = 0;
private volatile long startedAt = 0;
private volatile long completedAt = 0;
private volatile JobCompletedReason reason;
private volatile int resubmitOf = -1;
private volatile int totalResubmitCount = 0;
@JsonIgnore
private volatile Optional<Instant> lastHeartbeatAt = Optional.empty();
private volatile boolean subscribed;
private volatile Optional<String> preferredCluster;
private volatile Optional<ClusterID> resourceCluster;
/**
* Creates an instance of this class.
* @param workerIndex
* @param workerNumber
* @param jobId
* @param stageNum
* @param numberOfPorts
* @param workerPorts
* @param state
* @param slave
* @param slaveID
* @param acceptedAt
* @param launchedAt
* @param startingAt
* @param startedAt
* @param completedAt
* @param reason
* @param resubmitOf
* @param totalResubmitCount
* @param preferredCluster
*/
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public MantisWorkerMetadataImpl(@JsonProperty("workerIndex") int workerIndex,
@JsonProperty("workerNumber") int workerNumber,
@JsonProperty("jobId") String jobId,
@JsonProperty("stageNum") int stageNum,
@JsonProperty("numberOfPorts") int numberOfPorts,
@JsonProperty("workerPorts") WorkerPorts workerPorts,
@JsonProperty("state") WorkerState state,
@JsonProperty("slave") String slave,
@JsonProperty("slaveID") String slaveID,
@JsonProperty("acceptedAt") long acceptedAt,
@JsonProperty("launchedAt") long launchedAt,
@JsonProperty("startingAt") long startingAt,
@JsonProperty("startedAt") long startedAt,
@JsonProperty("completedAt") long completedAt,
@JsonProperty("reason") JobCompletedReason reason,
@JsonProperty("resubmitOf") int resubmitOf,
@JsonProperty("totalResubmitCount") int totalResubmitCount,
@JsonProperty("preferredCluster") Optional<String> preferredCluster,
@JsonProperty("resourceCluster") Optional<ClusterID> resourceCluster
) {
this.workerIndex = workerIndex;
this.workerNumber = workerNumber;
this.jobId = jobId;
this.jobIdObj = JobId.fromId(jobId).orElseThrow(() -> new IllegalArgumentException(
"jobId format is invalid" + jobId));
this.workerId = new WorkerId(jobId, workerIndex, workerNumber);
this.stageNum = stageNum;
this.numberOfPorts = numberOfPorts;
this.workerPorts = workerPorts;
this.state = state;
this.slave = slave;
this.slaveID = slaveID;
this.state = state;
this.acceptedAt = acceptedAt;
this.launchedAt = launchedAt;
this.completedAt = completedAt;
this.startedAt = startedAt;
this.startingAt = startingAt;
this.reason = reason;
this.resubmitOf = resubmitOf;
this.totalResubmitCount = totalResubmitCount;
this.preferredCluster = preferredCluster;
this.resourceCluster = resourceCluster;
this.totalResubmitCount = totalResubmitCount;
}
public int getWorkerIndex() {
return workerIndex;
}
public int getWorkerNumber() {
return workerNumber;
}
public String getJobId() {
return jobId;
}
public JobId getJobIdObject() {
return jobIdObj;
}
public WorkerId getWorkerId() {
return workerId;
}
public int getStageNum() {
return stageNum;
}
public int getNumberOfPorts() {
return numberOfPorts;
}
public Optional<WorkerPorts> getPorts() {
return Optional.ofNullable(workerPorts);
}
public WorkerPorts getWorkerPorts() {
return this.workerPorts;
}
void addPorts(final WorkerPorts ports) {
this.workerPorts = (ports);
}
public int getTotalResubmitCount() {
return totalResubmitCount;
}
public int getMetricsPort() {
return workerPorts == null ? -1 : workerPorts.getMetricsPort();
}
public int getDebugPort() {
return workerPorts == null ? -1 : workerPorts.getDebugPort();
}
public int getConsolePort() {
return workerPorts == null ? -1 : workerPorts.getConsolePort();
}
public int getCustomPort() {
return workerPorts == null ? -1 : workerPorts.getCustomPort();
}
public int getSinkPort() {
return workerPorts == null ? -1 : workerPorts.getSinkPort();
}
public int getResubmitOf() {
return resubmitOf;
}
@JsonIgnore
private void setResubmitInfo(int resubmitOf, int totalCount) {
this.resubmitOf = resubmitOf;
this.totalResubmitCount = totalCount;
}
@JsonIgnore
public Optional<Instant> getLastHeartbeatAt() {
return lastHeartbeatAt;
}
@JsonIgnore
void setLastHeartbeatAt(long lastHeartbeatAt) {
this.lastHeartbeatAt = Optional.of(Instant.ofEpochMilli(lastHeartbeatAt));
}
private void validateStateChange(WorkerState newState) throws InvalidWorkerStateChangeException {
if (!WorkerState.isValidStateChgTo(state, newState))
throw new InvalidWorkerStateChangeException(jobId, workerId, state, newState);
}
/**
* Update the state of the worker.
* @param newState
* @param when
* @param reason
* @throws InvalidWorkerStateChangeException
*/
void setState(WorkerState newState, long when, JobCompletedReason reason) throws InvalidWorkerStateChangeException {
WorkerState previousState = this.state;
validateStateChange(newState);
this.state = newState;
LOGGER.info("Worker {} State changed from {} to {}", this.workerId, previousState, state);
switch (state) {
case Accepted:
this.acceptedAt = when;
break;
case Launched:
this.launchedAt = when;
break;
case StartInitiated:
this.startingAt = when;
break;
case Started:
this.startedAt = when;
break;
case Failed:
this.completedAt = when;
LOGGER.info("Worker {} failedAt {}", this.workerId, when);
this.reason = reason == null ? JobCompletedReason.Lost : reason;
break;
case Completed:
this.completedAt = when;
LOGGER.info("Worker {} completedAt {}", this.workerId, when);
this.reason = reason == null ? JobCompletedReason.Normal : reason;
break;
default:
assert false : "Unexpected job state to set";
}
}
public WorkerState getState() {
return state;
}
void setSlave(String slave) {
this.slave = slave;
}
public String getSlave() {
return slave;
}
void setSlaveID(String slaveID) {
this.slaveID = slaveID;
}
void setCluster(Optional<String> cluster) {
this.preferredCluster = cluster;
}
void setResourceCluster(ClusterID resourceCluster) {
this.resourceCluster = Optional.of(resourceCluster);
}
public String getSlaveID() {
return slaveID;
}
public long getAcceptedAt() {
return acceptedAt;
}
public long getLaunchedAt() {
return launchedAt;
}
public long getStartingAt() {
return startingAt;
}
public long getStartedAt() {
return startedAt;
}
public long getCompletedAt() {
return completedAt;
}
void setIsSubscribed(boolean isSub) {
this.subscribed = isSub;
}
public boolean getIsSubscribed() {
return this.subscribed;
}
public JobCompletedReason getReason() {
return reason;
}
@Override
public Optional<String> getPreferredClusterOptional() {
return this.preferredCluster;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MantisWorkerMetadataImpl that = (MantisWorkerMetadataImpl) o;
return workerIndex == that.workerIndex
&& workerNumber == that.workerNumber
&& stageNum == that.stageNum
&& numberOfPorts == that.numberOfPorts
&& acceptedAt == that.acceptedAt
&& launchedAt == that.launchedAt
&& startingAt == that.startingAt
&& startedAt == that.startedAt
&& completedAt == that.completedAt
&& resubmitOf == that.resubmitOf
&& totalResubmitCount == that.totalResubmitCount
&& Objects.equals(jobId, that.jobId)
&& Objects.equals(workerId, that.workerId)
&& Objects.equals(workerPorts, that.workerPorts)
&& state == that.state
&& Objects.equals(slave, that.slave)
&& Objects.equals(slaveID, that.slaveID)
&& reason == that.reason
&& Objects.equals(preferredCluster, that.preferredCluster)
&& Objects.equals(resourceCluster, that.resourceCluster);
}
@Override
public int hashCode() {
return Objects.hash(workerIndex, workerNumber, jobId, stageNum, numberOfPorts,
workerId, workerPorts, state, slave, slaveID, acceptedAt, launchedAt,
startingAt, startedAt, completedAt, reason, resubmitOf, totalResubmitCount, preferredCluster);
}
@Override
public String toString() {
return "MantisWorkerMetadataImpl{"
+ "workerIndex=" + workerIndex
+ ", workerNumber=" + workerNumber
+ ", jobId=" + jobId
+ ", stageNum=" + stageNum
+ ", numberOfPorts=" + numberOfPorts
+ ", workerId=" + workerId
+ ", workerPorts=" + workerPorts
+ ", state=" + state
+ ", slave='" + slave + '\''
+ ", slaveID='" + slaveID + '\''
+ ", acceptedAt=" + acceptedAt
+ ", launchedAt=" + launchedAt
+ ", startingAt=" + startingAt
+ ", startedAt=" + startedAt
+ ", completedAt=" + completedAt
+ ", reason=" + reason
+ ", resubmitOf=" + resubmitOf
+ ", totalResubmitCount=" + totalResubmitCount
+ ", lastHeartbeatAt=" + lastHeartbeatAt
+ ", subscribed=" + subscribed
+ ", preferredCluster=" + preferredCluster
+ ", resourceCluster=" + resourceCluster
+ '}';
}
@Override
public Optional<String> getCluster() {
return this.preferredCluster;
}
@Override
public Optional<ClusterID> getResourceCluster() {
return this.resourceCluster;
}
}
| 8,076 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/worker/WorkerState.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job.worker;
import java.util.HashMap;
import java.util.Map;
/**
* Enumeration of all the states a Worker can be in.
* Worker State Machine:
* (Resource assignment) (Worker startup) (Worker running)
* [Accepted] --> [Launched] --> [StartInitiated] --> [Started]
* | | | / |
*
* [----------------------------------------Failed----------------------------------------] [Completed]
*/
public enum WorkerState {
/**
* Indicates a worker submission has been received by the Master.
*/
Accepted,
/**
* Indicates the worker has been scheduled onto a Mesos slave.
*/
Launched,
/**
* Indicates the worker is in the process of starting up.
*/
StartInitiated,
/**
* Indicates the worker is running.
*/
Started,
/**
* Indicates the worker has encountered a fatal error.
*/
Failed,
/**
* Indicates the worker has completed execution.
*/
Completed,
/**
* A place holder state.
*/
Noop,
/**
* Indicates that the actual state of the worker is unknown.
*/
Unknown;
/**
* A rollup worker state indicating whether worker is running or terminal.
*/
public enum MetaState {
/**
* Indicates the worker is not in a final state.
*/
Active,
/**
* Indicates worker is dead.
*/
Terminal
}
private static final Map<WorkerState, WorkerState[]> STATE_TRANSITION_MAP;
private static final Map<WorkerState, MetaState> META_STATES;
static {
STATE_TRANSITION_MAP = new HashMap<>();
STATE_TRANSITION_MAP.put(WorkerState.Accepted, new WorkerState[]
{WorkerState.Launched, WorkerState.Failed, WorkerState.Completed});
STATE_TRANSITION_MAP.put(WorkerState.Launched, new WorkerState[] {
WorkerState.StartInitiated, WorkerState.Started, WorkerState.Failed, WorkerState.Completed});
STATE_TRANSITION_MAP.put(WorkerState.StartInitiated, new WorkerState[] {WorkerState.StartInitiated,
WorkerState.Started, WorkerState.Failed, WorkerState.Completed});
STATE_TRANSITION_MAP.put(WorkerState.Started, new WorkerState[] {WorkerState.Started,
WorkerState.Failed, WorkerState.Completed});
STATE_TRANSITION_MAP.put(WorkerState.Failed, new WorkerState[] {WorkerState.Failed});
STATE_TRANSITION_MAP.put(WorkerState.Completed, new WorkerState[] {});
META_STATES = new HashMap<>();
META_STATES.put(WorkerState.Accepted, MetaState.Active);
META_STATES.put(WorkerState.Launched, MetaState.Active);
META_STATES.put(WorkerState.StartInitiated, MetaState.Active);
META_STATES.put(WorkerState.Started, MetaState.Active);
META_STATES.put(WorkerState.Failed, MetaState.Terminal);
META_STATES.put(WorkerState.Completed, MetaState.Terminal);
}
/**
* Returns true if the worker is in a state that indicates it is on Mesos slave.
* @param state
* @return
*/
public static boolean isWorkerOnSlave(WorkerState state) {
switch (state) {
case StartInitiated:
case Started:
return true;
default:
return false;
}
}
/**
* Returns true if the worker is any valid non terminal state.
* @param state
* @return
*/
public static boolean isRunningState(WorkerState state) {
switch (state) {
case Launched:
case StartInitiated:
case Started:
return true;
default:
return false;
}
}
/**
* Returns true if the old state -> new state transition is valid.
* @param currentState
* @param newState
* @return
*/
public static boolean isValidStateChgTo(WorkerState currentState, WorkerState newState) {
for (WorkerState validState : STATE_TRANSITION_MAP.get(currentState))
if (validState == newState)
return true;
return false;
}
/**
* Returns true if the worker is in a terminal state.
* @param state
* @return
*/
public static boolean isTerminalState(WorkerState state) {
switch (state) {
case Completed:
case Failed:
return true;
default:
return false;
}
}
/**
* Returns true if the worker is in error state.
* @param state
* @return
*/
public static boolean isErrorState(WorkerState state) {
switch (state) {
case Failed:
return true;
default:
return false;
}
}
/**
* Translates the given {@link WorkerState} to a MetaState.
* @param state
* @return
*/
public static MetaState toMetaState(WorkerState state) {
return META_STATES.get(state);
}
}
| 8,077 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/jobcluster/job/worker/WorkerTerminate.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job.worker;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.scheduler.WorkerEvent;
/**
* Encapsulates a worker terminated event.
*/
public class WorkerTerminate implements WorkerEvent {
private final JobCompletedReason reason;
private final WorkerId workerId;
private final long eventTime;
private final WorkerState finalState;
/**
* Creates an instance of this class.
* @param workerId
* @param state
* @param reason
* @param time
*/
public WorkerTerminate(WorkerId workerId, WorkerState state, JobCompletedReason reason, long time) {
this.workerId = workerId;
this.reason = reason;
this.finalState = state;
this.eventTime = time;
}
/**
* Creates an instance of this class. auto-populates the current time.
* @param workerId
* @param state
* @param reason
*/
public WorkerTerminate(WorkerId workerId, WorkerState state, JobCompletedReason reason) {
this(workerId, state, reason, System.currentTimeMillis());
}
@Override
public WorkerId getWorkerId() {
return workerId;
}
@Override
public long getEventTimeMs() {
return this.eventTime;
}
public JobCompletedReason getReason() {
return reason;
}
public WorkerState getFinalState() {
return finalState;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (int) (eventTime ^ (eventTime >>> 32));
result = prime * result + ((finalState == null) ? 0 : finalState.hashCode());
result = prime * result + ((reason == null) ? 0 : reason.hashCode());
result = prime * result + ((workerId == null) ? 0 : workerId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
WorkerTerminate other = (WorkerTerminate) obj;
if (eventTime != other.eventTime)
return false;
if (finalState != other.finalState)
return false;
if (reason != other.reason)
return false;
if (workerId == null) {
if (other.workerId != null)
return false;
} else if (!workerId.equals(other.workerId))
return false;
return true;
}
@Override
public String toString() {
return "WorkerTerminate [reason=" + reason + ", workerId=" + workerId + ", eventTime=" + eventTime
+ ", finalState=" + finalState + "]";
}
}
| 8,078 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/vm/AgentClusterOperationsImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.vm;
import com.netflix.fenzo.AutoScaleRule;
import com.netflix.fenzo.TaskRequest;
import com.netflix.fenzo.VirtualMachineCurrentState;
import com.netflix.fenzo.VirtualMachineLease;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.util.DateTimeExt;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventsProto;
import io.mantisrx.server.core.BaseService;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.AgentClustersAutoScaler;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.WorkerOnDisabledVM;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.google.common.collect.ImmutableSet;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import org.apache.mesos.Protos;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.schedulers.Schedulers;
public class AgentClusterOperationsImpl extends BaseService implements AgentClusterOperations {
static class ActiveVmAttributeValues {
private final List<String> values;
@JsonCreator
ActiveVmAttributeValues(@JsonProperty("values") List<String> values) {
this.values = values;
}
List<String> getValues() {
return values;
}
boolean isEmpty() {
return values==null || values.isEmpty();
}
}
private static final Logger logger = LoggerFactory.getLogger(AgentClusterOperationsImpl.class);
private final IMantisPersistenceProvider storageProvider;
private final JobMessageRouter jobMessageRouter;
private final MantisScheduler scheduler;
private final LifecycleEventPublisher lifecycleEventPublisher;
private volatile ActiveVmAttributeValues activeVmAttributeValues=null;
private final ConcurrentMap<String, List<VirtualMachineCurrentState>> vmStatesMap;
private final AgentClustersAutoScaler agentClustersAutoScaler;
private final String attrName;
private final Counter listJobsOnVMsCount;
public AgentClusterOperationsImpl(final IMantisPersistenceProvider storageProvider,
final JobMessageRouter jobMessageRouter,
final MantisScheduler scheduler,
final LifecycleEventPublisher lifecycleEventPublisher,
final String activeSlaveAttributeName) {
super(true);
Preconditions.checkNotNull(storageProvider, "storageProvider");
Preconditions.checkNotNull(jobMessageRouter, "jobMessageRouter");
Preconditions.checkNotNull(scheduler, "scheduler");
Preconditions.checkNotNull(lifecycleEventPublisher, "lifecycleEventPublisher");
Preconditions.checkNotNull(activeSlaveAttributeName, "activeSlaveAttributeName");
this.storageProvider = storageProvider;
this.jobMessageRouter = jobMessageRouter;
this.scheduler = scheduler;
this.lifecycleEventPublisher = lifecycleEventPublisher;
this.vmStatesMap = new ConcurrentHashMap<>();
this.agentClustersAutoScaler = AgentClustersAutoScaler.get();
this.attrName = activeSlaveAttributeName;
Metrics metrics = new Metrics.Builder()
.id("AgentClusterOperations")
.addCounter("listJobsOnVMsCount")
.build();
this.listJobsOnVMsCount = metrics.getCounter("listJobsOnVMsCount");
}
@Override
public void start() {
super.awaitActiveModeAndStart(() -> {
try {
Schedulers.computation().createWorker().schedulePeriodically(
() -> checkInactiveVMs(scheduler.getCurrentVMState()),
1,
30,
TimeUnit.SECONDS);
List<String> activeVmGroups = storageProvider.initActiveVmAttributeValuesList();
activeVmAttributeValues = new ActiveVmAttributeValues(activeVmGroups);
scheduler.setActiveVmGroups(activeVmAttributeValues.getValues());
logger.info("Initialized activeVmAttributeValues=" + (activeVmAttributeValues == null ? "null" : activeVmAttributeValues.getValues()));
} catch (IOException e) {
logger.error("Can't initialize activeVM attribute values list: " + e.getMessage());
}
});
}
@Override
public void setActiveVMsAttributeValues(List<String> values) throws IOException {
logger.info("setting active VMs to {}", values);
storageProvider.setActiveVmAttributeValuesList(values);
activeVmAttributeValues = new ActiveVmAttributeValues(values);
List<String> activeVMGroups = activeVmAttributeValues.getValues();
scheduler.setActiveVmGroups(activeVMGroups);
lifecycleEventPublisher.publishAuditEvent(
new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.CLUSTER_ACTIVE_VMS,
"ActiveVMs", String.join(", ", values))
);
}
@Override
public Set<String> getActiveVMsAttributeValues() {
return activeVmAttributeValues==null?
null :
ImmutableSet.copyOf(activeVmAttributeValues.values);
}
private List<JobsOnVMStatus> getJobsOnVMStatus() {
List<AgentClusterOperations.JobsOnVMStatus> result = new ArrayList<>();
final List<VirtualMachineCurrentState> vmCurrentStates = scheduler.getCurrentVMState();
if (vmCurrentStates != null && !vmCurrentStates.isEmpty()) {
for (VirtualMachineCurrentState currentState: vmCurrentStates) {
final VirtualMachineLease currAvailableResources = currentState.getCurrAvailableResources();
if (currAvailableResources != null) {
final Protos.Attribute attribute = currAvailableResources.getAttributeMap().get(attrName);
if(attribute!=null) {
AgentClusterOperations.JobsOnVMStatus s =
new AgentClusterOperations.JobsOnVMStatus(currAvailableResources.hostname(),
attribute.getText().getValue());
for (TaskRequest r: currentState.getRunningTasks()) {
final Optional<WorkerId> workerId = WorkerId.fromId(r.getId());
s.addJob(new AgentClusterOperations.JobOnVMInfo(
workerId.map(w -> w.getJobId()).orElse("InvalidJobId"),
-1,
workerId.map(w -> w.getWorkerIndex()).orElse(-1),
workerId.map(w -> w.getWorkerNum()).orElse(-1)));
}
result.add(s);
}
}
}
}
return result;
}
@Override
public Map<String, List<JobsOnVMStatus>> getJobsOnVMs() {
listJobsOnVMsCount.increment();
Map<String, List<JobsOnVMStatus>> result = new HashMap<>();
final List<JobsOnVMStatus> statusList = getJobsOnVMStatus();
if (!statusList.isEmpty()) {
for (JobsOnVMStatus status: statusList) {
List<JobsOnVMStatus> jobsOnVMStatuses = result.computeIfAbsent(status.getAttributeValue(), k -> new ArrayList<>());
jobsOnVMStatuses.add(status);
}
}
return result;
}
private boolean isIn(String name, Set<String> activeVMs) {
return activeVMs.contains(name);
}
@Override
public boolean isActive(String name) {
return activeVmAttributeValues==null || activeVmAttributeValues.isEmpty() ||
isIn(name, getActiveVMsAttributeValues());
}
@Override
public void setAgentInfos(List<VirtualMachineCurrentState> vmStates) {
vmStatesMap.put("0", vmStates);
}
@Override
public List<AgentInfo> getAgentInfos() {
List<VirtualMachineCurrentState> vmStates = vmStatesMap.get("0");
List<AgentInfo> agentInfos = new ArrayList<>();
if (vmStates != null && !vmStates.isEmpty()) {
for (VirtualMachineCurrentState s : vmStates) {
List<VirtualMachineLease.Range> ranges = s.getCurrAvailableResources().portRanges();
int ports = 0;
if (ranges != null && !ranges.isEmpty())
for (VirtualMachineLease.Range r : ranges)
ports += r.getEnd() - r.getBeg();
Map<String, Protos.Attribute> attributeMap = s.getCurrAvailableResources().getAttributeMap();
Map<String, String> attributes = new HashMap<>();
if (attributeMap != null && !attributeMap.isEmpty()) {
for (Map.Entry<String, Protos.Attribute> entry : attributeMap.entrySet()) {
attributes.put(entry.getKey(), entry.getValue().getText().getValue());
}
}
agentInfos.add(new AgentInfo(
s.getHostname(), s.getCurrAvailableResources().cpuCores(),
s.getCurrAvailableResources().memoryMB(), s.getCurrAvailableResources().diskMB(),
ports, s.getCurrAvailableResources().getScalarValues(), attributes, s.getResourceSets().keySet(),
getTimeString(s.getDisabledUntil())
));
}
}
return agentInfos;
}
@Override
public Map<String, AgentClusterAutoScaleRule> getAgentClusterAutoScaleRules() {
final Set<AutoScaleRule> agentAutoscaleRules = agentClustersAutoScaler.getRules();
final Map<String, AgentClusterAutoScaleRule> result = new HashMap<>();
if (agentAutoscaleRules != null && !agentAutoscaleRules.isEmpty()) {
for (AutoScaleRule r: agentAutoscaleRules) {
result.put(r.getRuleName(),
new AgentClusterOperations.AgentClusterAutoScaleRule(
r.getRuleName(),
r.getCoolDownSecs(),
r.getMinIdleHostsToKeep(),
r.getMaxIdleHostsToKeep(),
r.getMinSize(),
r.getMaxSize()));
}
}
return result;
}
private String getTimeString(long disabledUntil) {
if (System.currentTimeMillis() > disabledUntil)
return null;
return DateTimeExt.toUtcDateTimeString(disabledUntil);
}
List<String> manageActiveVMs(final List<VirtualMachineCurrentState> currentStates) {
List<String> inactiveVMs = new ArrayList<>();
if(currentStates!=null && !currentStates.isEmpty()) {
final Set<String> values = getActiveVMsAttributeValues();
if(values==null || values.isEmpty())
return Collections.EMPTY_LIST; // treat no valid active VMs attribute value as all are active
for(VirtualMachineCurrentState currentState: currentStates) {
final VirtualMachineLease lease = currentState.getCurrAvailableResources();
//logger.info("Lease for VM: " + currentState.getCurrAvailableResources());
if(lease != null) {
final Collection<TaskRequest> runningTasks = currentState.getRunningTasks();
if(runningTasks!=null && !runningTasks.isEmpty()) {
final Map<String,Protos.Attribute> attributeMap = lease.getAttributeMap();
if(attributeMap!=null && !attributeMap.isEmpty()) {
final Protos.Attribute attribute = attributeMap.get(attrName);
if(attribute!=null && attribute.hasText()) {
if(!isIn(attribute.getText().getValue(), values)) {
inactiveVMs.add(lease.hostname());
for(TaskRequest t: runningTasks) {
Optional<WorkerId> workerIdO = WorkerId.fromId(t.getId());
workerIdO.ifPresent(workerId -> jobMessageRouter.routeWorkerEvent(new WorkerOnDisabledVM(workerId)));
}
}
}
else
logger.warn("No attribute value for " + attrName + " found on VM " + lease.hostname() +
" that has " + runningTasks.size() + " tasks on it");
}
else
logger.warn("No attributes found on VM " + lease.hostname() + " that has " + runningTasks.size() + " tasks on it");
}
}
}
}
return inactiveVMs;
}
private void checkInactiveVMs(List<VirtualMachineCurrentState> vmCurrentStates) {
logger.debug("Checking on any workers on VMs that are not active anymore");
final List<String> inactiveVMs = manageActiveVMs(vmCurrentStates);
if (inactiveVMs!=null && !inactiveVMs.isEmpty()) {
for(String vm: inactiveVMs) {
logger.info("expiring all leases of inactive vm " + vm);
scheduler.rescindOffers(vm);
}
}
}
}
| 8,079 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/vm/AgentClusterOperations.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.vm;
import com.netflix.fenzo.VirtualMachineCurrentState;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
public interface AgentClusterOperations {
void setActiveVMsAttributeValues(List<String> values) throws IOException;
Set<String> getActiveVMsAttributeValues();
boolean isActive(String name);
void setAgentInfos(List<VirtualMachineCurrentState> agentInfos);
List<AgentInfo> getAgentInfos();
/**
* Get all current jobs assigned to VMs. This produces a map with key as the value for VM attribute used to
* set active VMs. The values of the map are the list of jobs on VM status objects.
* @return current jobs assigned to VMs.
*/
Map<String, List<JobsOnVMStatus>> getJobsOnVMs();
Map<String, AgentClusterAutoScaleRule> getAgentClusterAutoScaleRules();
class JobOnVMInfo {
private final String jobId;
private final int stage;
private final int workerIndex;
private final int workerNumber;
@JsonCreator
public JobOnVMInfo(@JsonProperty("jobId") String jobId,
@JsonProperty("stage") int stage,
@JsonProperty("workerIndex") int workerIndex,
@JsonProperty("workerNumber") int workerNumber) {
this.jobId = jobId;
this.stage = stage;
this.workerIndex = workerIndex;
this.workerNumber = workerNumber;
}
public String getJobId() {
return jobId;
}
public int getStage() {
return stage;
}
public int getWorkerIndex() {
return workerIndex;
}
public int getWorkerNumber() {
return workerNumber;
}
}
class JobsOnVMStatus {
private final String hostname;
private final String attributeValue;
private final List<JobOnVMInfo> jobs;
@JsonCreator
public JobsOnVMStatus(@JsonProperty("hostname") String hostname,
@JsonProperty("attributeValue") String attributeValue) {
this.hostname = hostname;
this.attributeValue = attributeValue;
this.jobs = new ArrayList<>();
}
@JsonIgnore
void addJob(JobOnVMInfo job) {
jobs.add(job);
}
public String getHostname() {
return hostname;
}
public String getAttributeValue() {
return attributeValue;
}
public List<JobOnVMInfo> getJobs() {
return jobs;
}
}
class AgentInfo {
private final String name;
private final double availableCpus;
private final double availableMemory;
private final double availableDisk;
private final int availableNumPorts;
private final Map<String, Double> scalars;
private final Map<String, String> attributes;
private final Set<String> resourceSets;
private final String disabledUntil;
@JsonCreator
public AgentInfo(@JsonProperty("name") String name,
@JsonProperty("availableCpus") double availableCpus,
@JsonProperty("availableMemory") double availableMemory,
@JsonProperty("availableDisk") double availableDisk,
@JsonProperty("availableNumPorts") int availableNumPorts,
@JsonProperty("scalars") Map<String, Double> scalars,
@JsonProperty("attributes") Map<String, String> attributes,
@JsonProperty("resourceSets") Set<String> resourceSets,
@JsonProperty("disabledUntil") String disabledUntil) {
this.name = name;
this.availableCpus = availableCpus;
this.availableMemory = availableMemory;
this.availableDisk = availableDisk;
this.availableNumPorts = availableNumPorts;
this.scalars = scalars;
this.attributes = attributes;
this.resourceSets = resourceSets;
this.disabledUntil = disabledUntil;
}
public String getName() {
return name;
}
public double getAvailableCpus() {
return availableCpus;
}
public double getAvailableMemory() {
return availableMemory;
}
public double getAvailableDisk() {
return availableDisk;
}
public int getAvailableNumPorts() {
return availableNumPorts;
}
public Map<String, Double> getScalars() {
return scalars;
}
public Map<String, String> getAttributes() {
return attributes;
}
public Set<String> getResourceSets() {
return resourceSets;
}
public String getDisabledUntil() {
return disabledUntil;
}
}
class AgentClusterAutoScaleRule {
private final String name;
private final long cooldownSecs;
private final int minIdle;
private final int maxIdle;
private final int minSize;
private final int maxSize;
@JsonCreator
public AgentClusterAutoScaleRule(@JsonProperty("name") final String name,
@JsonProperty("cooldownSecs") final long cooldownSecs,
@JsonProperty("minIdle") final int minIdle,
@JsonProperty("maxIdle") final int maxIdle,
@JsonProperty("minSize") final int minSize,
@JsonProperty("maxSize") final int maxSize) {
this.name = name;
this.cooldownSecs = cooldownSecs;
this.minIdle = minIdle;
this.maxIdle = maxIdle;
this.minSize = minSize;
this.maxSize = maxSize;
}
public String getName() {
return name;
}
public long getCooldownSecs() {
return cooldownSecs;
}
public int getMinIdle() {
return minIdle;
}
public int getMaxIdle() {
return maxIdle;
}
public int getMinSize() {
return minSize;
}
public int getMaxSize() {
return maxSize;
}
}
}
| 8,080 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/ResourceClustersAkkaImpl.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.pattern.Patterns;
import io.mantisrx.server.master.config.ConfigurationFactory;
import io.mantisrx.server.master.config.ConfigurationProvider;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ResourceCluster;
import io.mantisrx.server.master.resourcecluster.ResourceClusters;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import java.time.Clock;
import java.time.Duration;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Supplier;
import lombok.AccessLevel;
import lombok.RequiredArgsConstructor;
import org.apache.flink.runtime.rpc.RpcService;
/**
* This class is an implementation of {@link ResourceClusters} that uses the Akka actor implementation under the hood.
* You can think of this class as a more java typed-way of sharing the functionalities of the akka actor.
*/
@RequiredArgsConstructor(access = AccessLevel.PRIVATE)
public class ResourceClustersAkkaImpl implements ResourceClusters {
private final ActorRef resourceClustersManagerActor;
private final Duration askTimeout;
private final Supplier<Integer> rateLimitPerSecond;
private final ConcurrentMap<ClusterID, ResourceCluster> cache =
new ConcurrentHashMap<>();
@Override
public ResourceCluster getClusterFor(ClusterID clusterID) {
cache.computeIfAbsent(
clusterID,
dontCare ->
new ResourceClusterAkkaImpl(
resourceClustersManagerActor,
askTimeout,
clusterID,
rateLimitPerSecond));
return cache.get(clusterID);
}
@Override
public CompletableFuture<Set<ClusterID>> listActiveClusters() {
return
Patterns.ask(resourceClustersManagerActor,
new ResourceClustersManagerActor.ListActiveClusters(), askTimeout)
.toCompletableFuture()
.thenApply(ResourceClustersManagerActor.ClusterIdSet.class::cast)
.thenApply(clusterIdSet -> clusterIdSet.getClusterIDS());
}
public static ResourceClusters load(
ConfigurationFactory masterConfiguration,
RpcService rpcService,
ActorSystem actorSystem,
MantisJobStore mantisJobStore,
JobMessageRouter jobMessageRouter,
ActorRef resourceClusterHostActorRef,
IMantisPersistenceProvider persistenceProvider) {
final ActorRef resourceClusterManagerActor =
actorSystem.actorOf(
ResourceClustersManagerActor.props(masterConfiguration.getConfig(), Clock.systemDefaultZone(),
rpcService, mantisJobStore, resourceClusterHostActorRef, persistenceProvider,
jobMessageRouter));
final Duration askTimeout = java.time.Duration.ofMillis(
ConfigurationProvider.getConfig().getMasterApiAskTimeoutMs());
final Supplier<Integer> rateLimitPerSecond = () -> masterConfiguration.getConfig().getResourceClusterActionsPermitsPerSecond();
return new ResourceClustersAkkaImpl(resourceClusterManagerActor, askTimeout, rateLimitPerSecond);
}
}
| 8,081 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/DisableTaskExecutorsRequest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.time.Instant;
import java.util.Map;
import java.util.Optional;
import java.util.TreeMap;
import javax.annotation.Nullable;
import javax.xml.bind.DatatypeConverter;
import lombok.Value;
@Value
public class DisableTaskExecutorsRequest {
Map<String, String> attributes;
ClusterID clusterID;
Instant expiry;
Optional<TaskExecutorID> taskExecutorID;
boolean isRequestByAttributes() {
return attributes.size() > 0;
}
boolean isExpired(Instant now) {
return expiry.compareTo(now) <= 0;
}
boolean targetsSameTaskExecutorsAs(DisableTaskExecutorsRequest another) {
return this.isRequestByAttributes() && another.isRequestByAttributes() && this.attributes.entrySet().containsAll(another.attributes.entrySet());
}
boolean covers(@Nullable TaskExecutorRegistration registration) {
return this.isRequestByAttributes() && registration != null && registration.containsAttributes(this.attributes);
}
public String getHash() {
try {
MessageDigest messageDigest = MessageDigest.getInstance("MD5");
messageDigest.update(clusterID.getResourceID().getBytes(StandardCharsets.UTF_8));
taskExecutorID.ifPresent(executorID -> messageDigest.update(executorID.getResourceId().getBytes(StandardCharsets.UTF_8)));
TreeMap<String, String> clone = new TreeMap<>(attributes);
clone.forEach((key, value) -> {
messageDigest.update(key.getBytes(StandardCharsets.UTF_8));
messageDigest.update(value.getBytes(StandardCharsets.UTF_8));
});
return DatatypeConverter.printHexBinary(messageDigest.digest());
} catch (NoSuchAlgorithmException exception) {
// don't expect this to happen
// let's just throw a runtime exception in this case
throw new RuntimeException(exception);
}
}
}
| 8,082 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/ResourceClusterGatewayAkkaImpl.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import akka.actor.ActorRef;
import akka.pattern.Patterns;
import com.spotify.futures.CompletableFutures;
import io.mantisrx.common.Ack;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.server.master.resourcecluster.RequestThrottledException;
import io.mantisrx.server.master.resourcecluster.ResourceClusterGateway;
import io.mantisrx.server.master.resourcecluster.TaskExecutorDisconnection;
import io.mantisrx.server.master.resourcecluster.TaskExecutorHeartbeat;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.server.master.resourcecluster.TaskExecutorStatusChange;
import io.mantisrx.shaded.com.google.common.util.concurrent.RateLimiter;
import java.time.Duration;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Supplier;
import lombok.extern.slf4j.Slf4j;
@Slf4j
class ResourceClusterGatewayAkkaImpl implements ResourceClusterGateway {
protected final ActorRef resourceClusterManagerActor;
protected final Duration askTimeout;
private final Counter registrationCounter;
private final Counter heartbeatCounter;
private final Counter disconnectionCounter;
private final Counter throttledCounter;
private final RateLimiter rateLimiter;
// todo: cleanup scheduler on service shutdown.
private final ScheduledExecutorService semaphoreResetScheduler = Executors.newScheduledThreadPool(1);
ResourceClusterGatewayAkkaImpl(
ActorRef resourceClusterManagerActor,
Duration askTimeout,
Supplier<Integer> maxConcurrentRequestCount) {
this.resourceClusterManagerActor = resourceClusterManagerActor;
this.askTimeout = askTimeout;
log.info("Setting maxConcurrentRequestCount for resourceCluster gateway {}", maxConcurrentRequestCount);
this.rateLimiter = RateLimiter.create(maxConcurrentRequestCount.get());
semaphoreResetScheduler.scheduleAtFixedRate(() -> {
int newRate = maxConcurrentRequestCount.get();
log.info("Setting the rate limiter rate to {}", newRate);
rateLimiter.setRate(newRate);
}, 1, 1, TimeUnit.MINUTES);
Metrics m = new Metrics.Builder()
.id("ResourceClusterGatewayAkkaImpl")
.addCounter("registrationCounter")
.addCounter("heartbeatCounter")
.addCounter("disconnectionCounter")
.addCounter("throttledCounter")
.build();
Metrics metrics = MetricsRegistry.getInstance().registerAndGet(m);
this.registrationCounter = metrics.getCounter("registrationCounter");
this.heartbeatCounter = metrics.getCounter("heartbeatCounter");
this.disconnectionCounter = metrics.getCounter("disconnectionCounter");
this.throttledCounter = metrics.getCounter("throttledCounter");
}
private <In, Out> Function<In, CompletableFuture<Out>> withThrottle(Function<In, CompletableFuture<Out>> func) {
return in -> {
if (rateLimiter.tryAcquire()) {
return func.apply(in);
} else {
this.throttledCounter.increment();
return CompletableFutures.exceptionallyCompletedFuture(
new RequestThrottledException("Throttled req: " + in.getClass().getSimpleName())
);
}
};
}
@Override
public CompletableFuture<Ack> registerTaskExecutor(TaskExecutorRegistration registration) {
return withThrottle(this::registerTaskExecutorImpl).apply(registration);
}
private CompletableFuture<Ack> registerTaskExecutorImpl(TaskExecutorRegistration registration) {
this.registrationCounter.increment();
return Patterns
.ask(resourceClusterManagerActor, registration, askTimeout)
.thenApply(Ack.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<Ack> heartBeatFromTaskExecutor(TaskExecutorHeartbeat heartbeat) {
return withThrottle(this::heartBeatFromTaskExecutorImpl).apply(heartbeat);
}
private CompletableFuture<Ack> heartBeatFromTaskExecutorImpl(TaskExecutorHeartbeat heartbeat) {
this.heartbeatCounter.increment();
return
Patterns
.ask(resourceClusterManagerActor, heartbeat, askTimeout)
.thenApply(Ack.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<Ack> notifyTaskExecutorStatusChange(TaskExecutorStatusChange statusChange) {
return
Patterns
.ask(resourceClusterManagerActor, statusChange, askTimeout)
.thenApply(Ack.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<Ack> disconnectTaskExecutor(
TaskExecutorDisconnection taskExecutorDisconnection) {
this.disconnectionCounter.increment();
return withThrottle(this::disconnectTaskExecutorImpl).apply(taskExecutorDisconnection);
}
CompletableFuture<Ack> disconnectTaskExecutorImpl(
TaskExecutorDisconnection taskExecutorDisconnection) {
return
Patterns.ask(resourceClusterManagerActor, taskExecutorDisconnection, askTimeout)
.thenApply(Ack.class::cast)
.toCompletableFuture();
}
}
| 8,083 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/ResourceClustersManagerActor.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import akka.actor.AbstractActor;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.SupervisorStrategy;
import akka.japi.pf.ReceiveBuilder;
import io.mantisrx.master.akka.MantisActorSupervisorStrategy;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.AddNewJobArtifactsToCacheRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetActiveJobsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetAssignedTaskExecutorRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetAvailableTaskExecutorsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetBusyTaskExecutorsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetJobArtifactsToCacheRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetRegisteredTaskExecutorsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetTaskExecutorStatusRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetUnregisteredTaskExecutorsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.RemoveJobArtifactsToCacheRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.ResourceOverviewRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.TaskExecutorAssignmentRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.TaskExecutorGatewayRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.TaskExecutorInfoRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterScalerActor.TriggerClusterRuleRefreshRequest;
import io.mantisrx.master.resourcecluster.proto.SetResourceClusterScalerStatusRequest;
import io.mantisrx.server.master.config.MasterConfiguration;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorDisconnection;
import io.mantisrx.server.master.resourcecluster.TaskExecutorHeartbeat;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.server.master.resourcecluster.TaskExecutorStatusChange;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import java.time.Clock;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import lombok.Builder;
import lombok.Value;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.runtime.rpc.RpcService;
/**
* Supervisor actor responsible for creating/deleting/listing all resource clusters in the system.
*/
@Slf4j
class ResourceClustersManagerActor extends AbstractActor {
private final MasterConfiguration masterConfiguration;
private final Clock clock;
private final RpcService rpcService;
private final MantisJobStore mantisJobStore;
// Cluster Id to <ResourceClusterActor, ResourceClusterScalerActor> map.
private final Map<ClusterID, ActorHolder> resourceClusterActorMap;
private final ActorRef resourceClusterHostActor;
private final IMantisPersistenceProvider mantisPersistenceProvider;
private final JobMessageRouter jobMessageRouter;
public static Props props(
MasterConfiguration masterConfiguration,
Clock clock,
RpcService rpcService,
MantisJobStore mantisJobStore,
ActorRef resourceClusterHostActorRef,
IMantisPersistenceProvider mantisPersistenceProvider,
JobMessageRouter jobMessageRouter) {
return Props.create(
ResourceClustersManagerActor.class,
masterConfiguration,
clock,
rpcService,
mantisJobStore,
resourceClusterHostActorRef,
mantisPersistenceProvider,
jobMessageRouter);
}
public ResourceClustersManagerActor(
MasterConfiguration masterConfiguration, Clock clock,
RpcService rpcService,
MantisJobStore mantisJobStore,
ActorRef resourceClusterHostActorRef,
IMantisPersistenceProvider mantisPersistenceProvider,
JobMessageRouter jobMessageRouter) {
this.masterConfiguration = masterConfiguration;
this.clock = clock;
this.rpcService = rpcService;
this.mantisJobStore = mantisJobStore;
this.resourceClusterHostActor = resourceClusterHostActorRef;
this.mantisPersistenceProvider = mantisPersistenceProvider;
this.jobMessageRouter = jobMessageRouter;
this.resourceClusterActorMap = new HashMap<>();
}
@Override
public Receive createReceive() {
return
ReceiveBuilder
.create()
.match(ListActiveClusters.class, req -> sender().tell(getActiveClusters(), self()))
.match(GetRegisteredTaskExecutorsRequest.class, req -> getRCActor(req.getClusterID()).forward(req, context()))
.match(GetBusyTaskExecutorsRequest.class, req -> getRCActor(req.getClusterID()).forward(req, context()))
.match(GetAvailableTaskExecutorsRequest.class, req -> getRCActor(req.getClusterID()).forward(req, context()))
.match(GetUnregisteredTaskExecutorsRequest.class, req -> getRCActor(req.getClusterID()).forward(req, context()))
.match(GetTaskExecutorStatusRequest.class, req -> getRCActor(req.getClusterID()).forward(req, context()))
.match(GetActiveJobsRequest.class, req -> getRCActor(req.getClusterID()).forward(req, context()))
.match(GetAssignedTaskExecutorRequest.class, req -> getRCActor(req.getClusterID()).forward(req, context()))
.match(TaskExecutorRegistration.class, registration ->
getRCActor(registration.getClusterID()).forward(registration, context()))
.match(TaskExecutorHeartbeat.class, heartbeat ->
getRCActor(heartbeat.getClusterID()).forward(heartbeat, context()))
.match(TaskExecutorStatusChange.class, statusChange ->
getRCActor(statusChange.getClusterID()).forward(statusChange, context()))
.match(TaskExecutorDisconnection.class, disconnection ->
getRCActor(disconnection.getClusterID()).forward(disconnection, context()))
.match(TaskExecutorAssignmentRequest.class, req ->
getRCActor(req.getClusterID()).forward(req, context()))
.match(ResourceOverviewRequest.class, req ->
getRCActor(req.getClusterID()).forward(req, context()))
.match(TaskExecutorInfoRequest.class, req ->
getRCActor(req.getClusterID()).forward(req, context()))
.match(TaskExecutorGatewayRequest.class, req ->
getRCActor(req.getClusterID()).forward(req, context()))
.match(DisableTaskExecutorsRequest.class, req ->
getRCActor(req.getClusterID()).forward(req, context()))
.match(AddNewJobArtifactsToCacheRequest.class, req ->
getRCActor(req.getClusterID()).forward(req, context()))
.match(RemoveJobArtifactsToCacheRequest.class, req ->
getRCActor(req.getClusterID()).forward(req, context()))
.match(GetJobArtifactsToCacheRequest.class, req ->
getRCActor(req.getClusterID()).forward(req, context()))
.match(TriggerClusterRuleRefreshRequest.class, req ->
getRCScalerActor(req.getClusterID()).forward(req, context()))
.match(SetResourceClusterScalerStatusRequest.class, req ->
getRCScalerActor(req.getClusterID()).forward(req, context()))
.build();
}
private ActorRef createResourceClusterActorFor(ClusterID clusterID) {
log.info("Creating resource cluster actor for {}", clusterID);
ActorRef clusterActor =
getContext().actorOf(
ResourceClusterActor.props(
clusterID,
Duration.ofMillis(masterConfiguration.getHeartbeatIntervalInMs()),
Duration.ofMillis(masterConfiguration.getAssignmentIntervalInMs()),
Duration.ofMillis(masterConfiguration.getAssignmentIntervalInMs()),
clock,
rpcService,
mantisJobStore,
jobMessageRouter,
masterConfiguration.getMaxJobArtifactsToCache(),
masterConfiguration.getJobClustersWithArtifactCachingEnabled(),
masterConfiguration.isJobArtifactCachingEnabled()),
"ResourceClusterActor-" + clusterID.getResourceID());
log.info("Created resource cluster actor for {}", clusterID);
return clusterActor;
}
private ActorRef createResourceClusterScalerActorFor(ClusterID clusterID, ActorRef rcActor) {
log.info("Creating resource cluster scaler actor for {}", clusterID);
ActorRef clusterScalerActor =
getContext().actorOf(
ResourceClusterScalerActor.props(
clusterID,
clock,
Duration.ofSeconds(masterConfiguration.getScalerTriggerThresholdInSecs()),
Duration.ofSeconds(masterConfiguration.getScalerRuleSetRefreshThresholdInSecs()),
this.mantisPersistenceProvider,
this.resourceClusterHostActor,
rcActor
),
"ResourceClusterScalerActor-" + clusterID.getResourceID());
log.info("Created resource cluster scaler actor for {}", clusterID);
return clusterScalerActor;
}
private ActorRef getRCActor(ClusterID clusterID) {
return getOrCreateRCActors(clusterID).getResourceClusterActor();
}
private ActorRef getRCScalerActor(ClusterID clusterID) {
return getOrCreateRCActors(clusterID).getResourceClusterScalerActor();
}
private ActorHolder getOrCreateRCActors(ClusterID clusterID) {
if (resourceClusterActorMap.get(clusterID) != null) {
return resourceClusterActorMap.get(clusterID);
} else {
return resourceClusterActorMap.computeIfAbsent(clusterID, (dontCare) -> {
ActorRef rcActorRef = createResourceClusterActorFor(clusterID);
getContext().watch(rcActorRef);
ActorRef scalerActorRef = createResourceClusterScalerActorFor(clusterID, rcActorRef);
getContext().watch(scalerActorRef);
return ActorHolder.builder()
.resourceClusterActor(rcActorRef)
.resourceClusterScalerActor(scalerActorRef)
.build();
});
}
}
private ClusterIdSet getActiveClusters() {
return new ClusterIdSet(resourceClusterActorMap.keySet());
}
@Value
static class ListActiveClusters {
}
@Value
static class ClusterIdSet {
Set<ClusterID> clusterIDS;
}
@Override
public SupervisorStrategy supervisorStrategy() {
return MantisActorSupervisorStrategy.getInstance().create();
}
@Value
@Builder
static class ActorHolder {
ActorRef resourceClusterActor;
ActorRef resourceClusterScalerActor;
}
}
| 8,084 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/ResourceClustersHostManagerActor.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import static akka.pattern.Patterns.pipe;
import akka.actor.AbstractActorWithTimers;
import akka.actor.Props;
import akka.japi.pf.ReceiveBuilder;
import io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode;
import io.mantisrx.master.resourcecluster.proto.GetResourceClusterSpecRequest;
import io.mantisrx.master.resourcecluster.proto.ListResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.MantisResourceClusterSpec.SkuTypeSpec;
import io.mantisrx.master.resourcecluster.proto.ProvisionResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.DeleteResourceClusterRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.DeleteResourceClusterResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.GetResourceClusterResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterAPIProto.ListResourceClustersResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterProvisionSubmissionResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.CreateAllResourceClusterScaleRulesRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.CreateResourceClusterScaleRuleRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.GetResourceClusterScaleRulesRequest;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleRuleProto.GetResourceClusterScaleRulesResponse;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleSpec;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceRequest;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersRequest;
import io.mantisrx.master.resourcecluster.proto.UpgradeClusterContainersResponse;
import io.mantisrx.master.resourcecluster.resourceprovider.ResourceClusterProvider;
import io.mantisrx.master.resourcecluster.resourceprovider.ResourceClusterProviderUpgradeRequest;
import io.mantisrx.master.resourcecluster.writable.RegisteredResourceClustersWritable;
import io.mantisrx.master.resourcecluster.writable.ResourceClusterScaleRulesWritable;
import io.mantisrx.master.resourcecluster.writable.ResourceClusterScaleRulesWritable.ResourceClusterScaleRulesWritableBuilder;
import io.mantisrx.master.resourcecluster.writable.ResourceClusterSpecWritable;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.shaded.com.google.common.base.Strings;
import java.io.IOException;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
/**
* This actor is responsible to translate requests for resource cluster related operations from API server and other
* actors to binded resource cluster provider implementation.
*/
@Slf4j
public class ResourceClustersHostManagerActor extends AbstractActorWithTimers {
public static Props props(
final ResourceClusterProvider resourceClusterProvider,
final IMantisPersistenceProvider persistenceProvider) {
// TODO(andyz): investigate atlas metered-mailbox.
return Props.create(ResourceClustersHostManagerActor.class, resourceClusterProvider, persistenceProvider);
}
private final ResourceClusterProvider resourceClusterProvider;
private final IMantisPersistenceProvider resourceClusterStorageProvider;
public ResourceClustersHostManagerActor(
final ResourceClusterProvider resourceClusterProvider,
final IMantisPersistenceProvider resourceStorageProvider) {
this.resourceClusterProvider = resourceClusterProvider;
this.resourceClusterStorageProvider = resourceStorageProvider;
}
@Override
public Receive createReceive() {
return ReceiveBuilder.create()
.match(ProvisionResourceClusterRequest.class, this::onProvisionResourceClusterRequest)
.match(ListResourceClusterRequest.class, this::onListResourceClusterRequest)
.match(GetResourceClusterSpecRequest.class, this::onGetResourceClusterSpecRequest)
.match(DeleteResourceClusterRequest.class, this::onDeleteResourceCluster)
// Scale rule section
.match(CreateAllResourceClusterScaleRulesRequest.class, this::onCreateAllResourceClusterScaleRulesRequest)
.match(CreateResourceClusterScaleRuleRequest.class, this::onCreateResourceClusterScaleRuleRequest)
.match(GetResourceClusterScaleRulesRequest.class, this::onGetResourceClusterScaleRulesRequest)
.match(ResourceClusterProvisionSubmissionResponse.class, this::onResourceClusterProvisionResponse)
.match(ScaleResourceRequest.class, this::onScaleResourceClusterRequest)
// Upgrade section
.match(UpgradeClusterContainersRequest.class, this::onUpgradeClusterContainersRequest)
.build();
}
private void onCreateResourceClusterScaleRuleRequest(CreateResourceClusterScaleRuleRequest req) {
try {
ResourceClusterScaleSpec ruleSpec = ResourceClusterScaleSpec.builder()
.maxSize(req.getRule().getMaxSize())
.minSize(req.getRule().getMinSize())
.minIdleToKeep(req.getRule().getMinIdleToKeep())
.maxIdleToKeep(req.getRule().getMaxIdleToKeep())
.coolDownSecs(req.getRule().getCoolDownSecs())
.skuId(req.getRule().getSkuId())
.clusterId(req.getRule().getClusterId())
.build();
getSender().tell(toGetResourceClusterScaleRulesResponse(resourceClusterStorageProvider.registerResourceClusterScaleRule(ruleSpec)), getSelf());
} catch (Exception err) {
log.error("Error from registerResourceClusterScaleRule: {}", req, err);
GetResourceClusterScaleRulesResponse response =
GetResourceClusterScaleRulesResponse
.builder()
.message(err.getMessage())
.responseCode(ResponseCode.SERVER_ERROR)
.build();
getSender().tell(response, getSelf());
}
}
private void onGetResourceClusterScaleRulesRequest(GetResourceClusterScaleRulesRequest req) {
try {
final GetResourceClusterScaleRulesResponse response =
toGetResourceClusterScaleRulesResponse(
resourceClusterStorageProvider.getResourceClusterScaleRules(req.getClusterId()));
getSender().tell(response, getSelf());
} catch (IOException e) {
log.error("Error from getResourceClusterScaleRules: {}", req, e);
GetResourceClusterScaleRulesResponse errorResponse =
GetResourceClusterScaleRulesResponse
.builder()
.message(e.getMessage())
.responseCode(ResponseCode.SERVER_ERROR)
.build();
getSender().tell(errorResponse, getSelf());
}
}
private void onCreateAllResourceClusterScaleRulesRequest(CreateAllResourceClusterScaleRulesRequest req) {
ResourceClusterScaleRulesWritableBuilder rulesBuilder = ResourceClusterScaleRulesWritable.builder()
.clusterId(req.getClusterId());
req.getRules().forEach(r -> rulesBuilder.scaleRule(
r.getSkuId().getResourceID(),
ResourceClusterScaleSpec.builder()
.maxSize(r.getMaxSize())
.minSize(r.getMinSize())
.minIdleToKeep(r.getMinIdleToKeep())
.maxIdleToKeep(r.getMaxIdleToKeep())
.coolDownSecs(r.getCoolDownSecs())
.skuId(r.getSkuId())
.clusterId(r.getClusterId())
.build()));
GetResourceClusterScaleRulesResponse response;
try {
response =
toGetResourceClusterScaleRulesResponse(
resourceClusterStorageProvider.registerResourceClusterScaleRule(rulesBuilder.build()));
} catch (IOException e) {
log.error("Error from registerResourceClusterScaleRule: {}", req, e);
response =
GetResourceClusterScaleRulesResponse.builder()
.message(e.getMessage())
.responseCode(ResponseCode.SERVER_ERROR)
.build();
}
sender().tell(response, self());
}
private static ResourceClusterScaleRuleProto.GetResourceClusterScaleRulesResponse toGetResourceClusterScaleRulesResponse(
ResourceClusterScaleRulesWritable rules) {
return GetResourceClusterScaleRulesResponse.builder()
.responseCode(ResponseCode.SUCCESS)
.clusterId(rules.getClusterId())
.rules(rules.getScaleRules().entrySet().stream().map(kv ->
ResourceClusterScaleRuleProto.ResourceClusterScaleRule.builder()
.clusterId(kv.getValue().getClusterId())
.coolDownSecs(kv.getValue().getCoolDownSecs())
.maxIdleToKeep(kv.getValue().getMaxIdleToKeep())
.minIdleToKeep(kv.getValue().getMinIdleToKeep())
.maxSize(kv.getValue().getMaxSize())
.minSize(kv.getValue().getMinSize())
.skuId(kv.getValue().getSkuId())
.build())
.collect(Collectors.toList()))
.build();
}
private void onResourceClusterProvisionResponse(ResourceClusterProvisionSubmissionResponse resp) {
this.resourceClusterProvider.getResponseHandler().handleProvisionResponse(resp);
}
private void onDeleteResourceCluster(DeleteResourceClusterRequest req) {
/*
Proper cluster deletion requires handling various cleanups e.g.:
* Migrate existing jobs.
* Un-provision cluster resources (nodes, network, storage e.g.).
* Update internal tracking state and persistent data.
For now this API will only serve the persistence layer update.
*/
try {
this.resourceClusterStorageProvider.deregisterCluster(req.getClusterId());
DeleteResourceClusterResponse response =
DeleteResourceClusterResponse
.builder()
.responseCode(ResponseCode.SUCCESS)
.build();
getSender().tell(response, getSelf());
} catch (IOException err) {
DeleteResourceClusterResponse response =
DeleteResourceClusterResponse
.builder()
.message(err.getMessage())
.responseCode(ResponseCode.SERVER_ERROR)
.build();
getSender().tell(response, getSelf());
}
}
private void onListResourceClusterRequest(ListResourceClusterRequest req) {
try {
RegisteredResourceClustersWritable clustersW =
this.resourceClusterStorageProvider.getRegisteredResourceClustersWritable();
ListResourceClustersResponse response =
ListResourceClustersResponse.builder()
.responseCode(ResponseCode.SUCCESS)
.registeredResourceClusters(clustersW.getClusters().entrySet().stream().map(
kv -> ListResourceClustersResponse.RegisteredResourceCluster.builder()
.id(kv.getValue().getClusterId())
.version(kv.getValue().getVersion())
.build())
.collect(Collectors.toList()))
.build();
getSender().tell(response, getSelf());
} catch (IOException err) {
ListResourceClustersResponse response =
ListResourceClustersResponse
.builder()
.message(err.getMessage())
.responseCode(ResponseCode.SERVER_ERROR)
.build();
getSender().tell(response, getSelf());
}
}
private void onGetResourceClusterSpecRequest(GetResourceClusterSpecRequest req) {
try {
ResourceClusterSpecWritable specW =
this.resourceClusterStorageProvider.getResourceClusterSpecWritable(req.getId());
final GetResourceClusterResponse response;
if (specW == null) {
response = GetResourceClusterResponse.builder()
.responseCode(ResponseCode.CLIENT_ERROR_NOT_FOUND)
.build();
} else {
response = GetResourceClusterResponse.builder()
.responseCode(ResponseCode.SUCCESS)
.clusterSpec(specW.getClusterSpec())
.build();
}
getSender().tell(response, getSelf());
} catch (IOException err) {
GetResourceClusterResponse response = GetResourceClusterResponse.builder()
.responseCode(ResponseCode.SERVER_ERROR)
.message(err.getMessage())
.build();
getSender().tell(response, getSelf());
}
}
private void onProvisionResourceClusterRequest(ProvisionResourceClusterRequest req) {
/*
For a provision request, the following steps will be taken:
1. Persist the cluster request with spec to the resource storage provider.
2. Once persisted, reply to sender (e.g. http server route) to confirm the accepted request.
3. Queue the long-running provision task via resource cluster provider and register callback to self.
4. Handle provision callback and error handling.
(only logging for now as agent registration will happen directly inside agent).
*/
log.info("Entering onProvisionResourceClusterRequest: " + req);
// For now only full spec is supported during provision stage.
Optional<String> validationResultO = validateClusterSpec(req);
if (validationResultO.isPresent()) {
pipe(
CompletableFuture.completedFuture(GetResourceClusterResponse.builder()
.responseCode(ResponseCode.CLIENT_ERROR)
.message(validationResultO.get())
.build()),
getContext().dispatcher())
.to(getSender());
log.info("Invalid cluster spec, return client error. Req: {}", req.getClusterId());
log.debug("Full invalid Req: {}", req);
return;
}
ResourceClusterSpecWritable specWritable = ResourceClusterSpecWritable.builder()
.clusterSpec(req.getClusterSpec())
.version("")
.id(req.getClusterId())
.build();
// Cluster spec is returned for API request.
GetResourceClusterResponse response;
try {
ResourceClusterSpecWritable specW = this.resourceClusterStorageProvider.registerAndUpdateClusterSpec(specWritable);
response =
GetResourceClusterResponse
.builder()
.responseCode(ResponseCode.SUCCESS)
.clusterSpec(specW.getClusterSpec())
.build();
} catch (IOException err) {
response = GetResourceClusterResponse.builder()
.responseCode(ResponseCode.SERVER_ERROR)
.message(err.getMessage())
.build();
}
getSender().tell(response, getSelf());
if (response.responseCode.equals(ResponseCode.SUCCESS)) {
// Provision response is directed back to this actor to handle its submission result.
CompletionStage<ResourceClusterProvisionSubmissionResponse> provisionFut =
this.resourceClusterProvider
.provisionClusterIfNotPresent(req)
.exceptionally(err -> ResourceClusterProvisionSubmissionResponse.builder().error(err).build());
pipe(provisionFut, getContext().dispatcher()).to(getSelf());
}
}
private void onScaleResourceClusterRequest(ScaleResourceRequest req) {
log.info("Entering onScaleResourceClusterRequest: " + req);
// [Notes] for scaling-up the request can go straight into provider to increase desire size.
// FOr scaling-down the decision requires getting idle hosts first.
pipe(this.resourceClusterProvider.scaleResource(req), getContext().dispatcher()).to(getSender());
}
private void onUpgradeClusterContainersRequest(UpgradeClusterContainersRequest req) {
log.info("Entering onScaleResourceClusterRequest: " + req);
// [Notes] for scaling-up the request can go straight into provider to increase desire size.
// For scaling-down the decision requires getting idle hosts first.
// if enableSkuSpecUpgrade is true, first fetch the latest spec to override the sku spec during upgrade
// workflow.
CompletionStage<UpgradeClusterContainersResponse> upgradeFut;
if (req.isEnableSkuSpecUpgrade()) {
try {
ResourceClusterSpecWritable specW = this.resourceClusterStorageProvider.getResourceClusterSpecWritable(req.getClusterId());
if (specW == null) {
upgradeFut =
CompletableFuture.completedFuture(UpgradeClusterContainersResponse
.builder()
.responseCode(ResponseCode.CLIENT_ERROR_NOT_FOUND)
.build());
} else {
ResourceClusterProviderUpgradeRequest enrichedReq =
ResourceClusterProviderUpgradeRequest.from(req, specW.getClusterSpec());
upgradeFut = this.resourceClusterProvider.upgradeContainerResource(enrichedReq);
}
} catch (IOException err) {
upgradeFut = CompletableFuture.completedFuture(
UpgradeClusterContainersResponse
.builder()
.responseCode(ResponseCode.SERVER_ERROR)
.message(err.getMessage())
.build());
}
}
else {
log.info("Upgrading cluster image only: {}", req.getClusterId());
upgradeFut =
this.resourceClusterProvider.upgradeContainerResource(ResourceClusterProviderUpgradeRequest.from(req));
}
pipe(upgradeFut, getContext().dispatcher()).to(getSender());
}
private static Optional<String> validateClusterSpec(ProvisionResourceClusterRequest req) {
if (req.getClusterSpec() == null) {
log.error("Empty request without cluster spec: {}", req.getClusterId());
return Optional.of("cluster spec cannot be null");
}
if (!req.getClusterId().equals(req.getClusterSpec().getId())) {
log.error("Mismatch cluster id: {}, {}", req.getClusterId(), req.getClusterSpec().getId());
return Optional.of("cluster spec id doesn't match cluster id");
}
Optional<SkuTypeSpec> invalidSku = req.getClusterSpec().getSkuSpecs().stream().filter(sku ->
sku.getSkuId() == null || sku.getCapacity() == null || sku.getCpuCoreCount() < 1 ||
sku.getDiskSizeInMB() < 1 || sku.getMemorySizeInMB() < 1 || sku.getNetworkMbps() < 1 ||
Strings.isNullOrEmpty(sku.getImageId()))
.findAny();
if (invalidSku.isPresent()) {
log.error("Invalid request for cluster spec: {}, {}", req.getClusterId(), invalidSku.get());
return Optional.of("Invalid sku definition");
}
Optional<SkuTypeSpec> invalidSkuNameSpec = req.getClusterSpec().getSkuSpecs().stream().filter(sku ->
Character.isDigit(sku.getSkuId().getResourceID().charAt(sku.getSkuId().getResourceID().length() - 1)))
.findAny();
if (invalidSkuNameSpec.isPresent()) {
log.error("Invalid request for cluster spec sku id (cannot end with number): {}, {}", req.getClusterId(),
invalidSkuNameSpec.get());
return Optional.of("Invalid skuID (cannot end with number): " + invalidSkuNameSpec.get().getSkuId());
}
return Optional.empty();
}
}
| 8,085 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/ResourceClusterActor.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import static java.util.stream.Collectors.groupingBy;
import akka.actor.AbstractActorWithTimers;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.Status;
import akka.japi.pf.ReceiveBuilder;
import com.netflix.spectator.api.TagList;
import io.mantisrx.common.Ack;
import io.mantisrx.common.WorkerConstants;
import io.mantisrx.master.resourcecluster.proto.GetClusterIdleInstancesRequest;
import io.mantisrx.master.resourcecluster.proto.GetClusterIdleInstancesResponse;
import io.mantisrx.server.core.CacheJobArtifactsRequest;
import io.mantisrx.server.core.domain.ArtifactID;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.PagedActiveJobOverview;
import io.mantisrx.server.master.resourcecluster.ResourceCluster.NoResourceAvailableException;
import io.mantisrx.server.master.resourcecluster.ResourceCluster.ResourceOverview;
import io.mantisrx.server.master.resourcecluster.ResourceCluster.TaskExecutorNotFoundException;
import io.mantisrx.server.master.resourcecluster.ResourceCluster.TaskExecutorStatus;
import io.mantisrx.server.master.resourcecluster.TaskExecutorAllocationRequest;
import io.mantisrx.server.master.resourcecluster.TaskExecutorDisconnection;
import io.mantisrx.server.master.resourcecluster.TaskExecutorHeartbeat;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.server.master.resourcecluster.TaskExecutorReport;
import io.mantisrx.server.master.resourcecluster.TaskExecutorReport.Available;
import io.mantisrx.server.master.resourcecluster.TaskExecutorReport.Occupied;
import io.mantisrx.server.master.resourcecluster.TaskExecutorStatusChange;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import io.mantisrx.server.worker.TaskExecutorGateway.TaskNotFoundException;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import io.mantisrx.shaded.com.google.common.collect.Comparators;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import io.vavr.Tuple;
import java.io.IOException;
import java.net.URI;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.ToString;
import lombok.Value;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.flink.runtime.rpc.RpcService;
/**
* Akka actor implementation of ResourceCluster.
* The actor is not directly exposed to other classes. Instead, the actor is exposed via {@link ResourceClusterGatewayAkkaImpl} and
* {@link ResourceClusterAkkaImpl} classes, which pass the corresponding messages to the actor on method invocation and wait for the response
* returned by the actor. This essentially converts the actor behavior to a request/response style pattern while still
* keeping the benefits of the actor paradigm such as non-shared mutable data.
*/
@ToString(of = {"clusterID"})
@Slf4j
class ResourceClusterActor extends AbstractActorWithTimers {
private final Duration heartbeatTimeout;
private final Duration assignmentTimeout;
private final Duration disabledTaskExecutorsCheckInterval;
private final ExecutorStateManager executorStateManager;
private final Clock clock;
private final RpcService rpcService;
private final ClusterID clusterID;
private final MantisJobStore mantisJobStore;
private final Set<DisableTaskExecutorsRequest> activeDisableTaskExecutorsByAttributesRequests;
private final Set<TaskExecutorID> disabledTaskExecutors;
private final JobMessageRouter jobMessageRouter;
private final ResourceClusterActorMetrics metrics;
private final HashSet<ArtifactID> jobArtifactsToCache = new HashSet<>();
private final int maxJobArtifactsToCache;
private final String jobClustersWithArtifactCachingEnabled;
private final boolean isJobArtifactCachingEnabled;
static Props props(final ClusterID clusterID, final Duration heartbeatTimeout, Duration assignmentTimeout, Duration disabledTaskExecutorsCheckInterval, Clock clock, RpcService rpcService, MantisJobStore mantisJobStore, JobMessageRouter jobMessageRouter, int maxJobArtifactsToCache, String jobClustersWithArtifactCachingEnabled, boolean isJobArtifactCachingEnabled) {
return Props.create(ResourceClusterActor.class, clusterID, heartbeatTimeout, assignmentTimeout, disabledTaskExecutorsCheckInterval, clock, rpcService, mantisJobStore, jobMessageRouter, maxJobArtifactsToCache, jobClustersWithArtifactCachingEnabled, isJobArtifactCachingEnabled)
.withMailbox("akka.actor.metered-mailbox");
}
ResourceClusterActor(
ClusterID clusterID,
Duration heartbeatTimeout,
Duration assignmentTimeout,
Duration disabledTaskExecutorsCheckInterval,
Clock clock,
RpcService rpcService,
MantisJobStore mantisJobStore,
JobMessageRouter jobMessageRouter,
int maxJobArtifactsToCache,
String jobClustersWithArtifactCachingEnabled,
boolean isJobArtifactCachingEnabled) {
this.clusterID = clusterID;
this.heartbeatTimeout = heartbeatTimeout;
this.assignmentTimeout = assignmentTimeout;
this.disabledTaskExecutorsCheckInterval = disabledTaskExecutorsCheckInterval;
this.isJobArtifactCachingEnabled = isJobArtifactCachingEnabled;
this.clock = clock;
this.rpcService = rpcService;
this.jobMessageRouter = jobMessageRouter;
this.mantisJobStore = mantisJobStore;
this.activeDisableTaskExecutorsByAttributesRequests = new HashSet<>();
this.disabledTaskExecutors = new HashSet<>();
this.maxJobArtifactsToCache = maxJobArtifactsToCache;
this.jobClustersWithArtifactCachingEnabled = jobClustersWithArtifactCachingEnabled;
this.executorStateManager = new ExecutorStateManagerImpl();
this.metrics = new ResourceClusterActorMetrics();
}
@Override
public void preStart() throws Exception {
super.preStart();
fetchJobArtifactsToCache();
List<DisableTaskExecutorsRequest> activeRequests =
mantisJobStore.loadAllDisableTaskExecutorsRequests(clusterID);
for (DisableTaskExecutorsRequest request : activeRequests) {
onNewDisableTaskExecutorsRequest(request);
}
timers().startTimerWithFixedDelay(
String.format("periodic-disabled-task-executors-test-for-%s", clusterID.getResourceID()),
new CheckDisabledTaskExecutors("periodic"),
disabledTaskExecutorsCheckInterval);
timers().startTimerWithFixedDelay(
"periodic-resource-overview-metrics-publisher",
new PublishResourceOverviewMetricsRequest(),
Duration.ofMinutes(1));
}
@Override
public Receive createReceive() {
return
ReceiveBuilder
.create()
.match(GetRegisteredTaskExecutorsRequest.class,
req -> {
sender().tell(getTaskExecutors(filterByAttrs(req).and(ExecutorStateManager.isRegistered)), self());
})
.match(GetBusyTaskExecutorsRequest.class, req -> sender().tell(getTaskExecutors(filterByAttrs(req).and(ExecutorStateManager.isBusy)), self()))
.match(GetAvailableTaskExecutorsRequest.class, req -> sender().tell(getTaskExecutors(filterByAttrs(req).and(ExecutorStateManager.isAvailable)), self()))
.match(GetDisabledTaskExecutorsRequest.class, req -> sender().tell(getTaskExecutors(filterByAttrs(req).and(ExecutorStateManager.isDisabled)), self()))
.match(GetUnregisteredTaskExecutorsRequest.class, req -> sender().tell(getTaskExecutors(filterByAttrs(req).and(ExecutorStateManager.unregistered)), self()))
.match(GetActiveJobsRequest.class, this::getActiveJobs)
.match(GetTaskExecutorStatusRequest.class, this::getTaskExecutorStatus)
.match(GetClusterUsageRequest.class,
metrics.withTracking(req ->
sender().tell(this.executorStateManager.getClusterUsage(req), self())))
.match(GetClusterIdleInstancesRequest.class,
metrics.withTracking(req ->
sender().tell(onGetClusterIdleInstancesRequest(req), self())))
.match(GetAssignedTaskExecutorRequest.class, this::onAssignedTaskExecutorRequest)
.match(Ack.class, ack -> log.info("Received ack from {}", sender()))
.match(TaskExecutorAssignmentTimeout.class, this::onTaskExecutorAssignmentTimeout)
.match(TaskExecutorRegistration.class, metrics.withTracking(this::onTaskExecutorRegistration))
.match(InitializeTaskExecutorRequest.class, metrics.withTracking(this::onTaskExecutorInitialization))
.match(TaskExecutorHeartbeat.class, metrics.withTracking(this::onHeartbeat))
.match(TaskExecutorStatusChange.class, this::onTaskExecutorStatusChange)
.match(TaskExecutorDisconnection.class, metrics.withTracking(this::onTaskExecutorDisconnection))
.match(HeartbeatTimeout.class, metrics.withTracking(this::onTaskExecutorHeartbeatTimeout))
.match(TaskExecutorAssignmentRequest.class, metrics.withTracking(this::onTaskExecutorAssignmentRequest))
.match(ResourceOverviewRequest.class, this::onResourceOverviewRequest)
.match(TaskExecutorInfoRequest.class, this::onTaskExecutorInfoRequest)
.match(TaskExecutorGatewayRequest.class, metrics.withTracking(this::onTaskExecutorGatewayRequest))
.match(DisableTaskExecutorsRequest.class, this::onNewDisableTaskExecutorsRequest)
.match(CheckDisabledTaskExecutors.class, this::findAndMarkDisabledTaskExecutors)
.match(ExpireDisableTaskExecutorsRequest.class, this::onDisableTaskExecutorsRequestExpiry)
.match(GetTaskExecutorWorkerMappingRequest.class, req -> sender().tell(getTaskExecutorWorkerMapping(req.getAttributes()), self()))
.match(PublishResourceOverviewMetricsRequest.class, this::onPublishResourceOverviewMetricsRequest)
.match(CacheJobArtifactsOnTaskExecutorRequest.class, metrics.withTracking(this::onCacheJobArtifactsOnTaskExecutorRequest))
.match(AddNewJobArtifactsToCacheRequest.class, this::onAddNewJobArtifactsToCacheRequest)
.match(RemoveJobArtifactsToCacheRequest.class, this::onRemoveJobArtifactsToCacheRequest)
.match(GetJobArtifactsToCacheRequest.class, req -> sender().tell(new ArtifactList(new ArrayList<>(jobArtifactsToCache)), self()))
.build();
}
private void onAddNewJobArtifactsToCacheRequest(AddNewJobArtifactsToCacheRequest req) {
try {
Set<ArtifactID> newArtifacts = new HashSet<>(req.artifacts);
newArtifacts.removeAll(jobArtifactsToCache);
if (!newArtifacts.isEmpty()) {
if(jobArtifactsToCache.size() < maxJobArtifactsToCache) {
log.info("Storing and caching new artifacts: {}", newArtifacts);
jobArtifactsToCache.addAll(newArtifacts);
mantisJobStore.addNewJobArtifactsToCache(req.getClusterID(), ImmutableList.copyOf(jobArtifactsToCache));
refreshTaskExecutorJobArtifactCache();
} else {
log.warn("Cannot enable caching for artifacts {}. Max number ({}) of job artifacts to cache reached.", newArtifacts, maxJobArtifactsToCache);
metrics.incrementCounter(
ResourceClusterActorMetrics.MAX_JOB_ARTIFACTS_TO_CACHE_REACHED,
TagList.create(ImmutableMap.of(
"resourceCluster",
clusterID.getResourceID())));
}
}
sender().tell(Ack.getInstance(), self());
} catch (IOException e) {
log.warn("Cannot add new job artifacts {} to cache in cluster: {}", req.getArtifacts(), req.getClusterID(), e);
}
}
private void refreshTaskExecutorJobArtifactCache() {
// TODO: implement rate control to confirm we are not overwhelming the TEs with excessive caching requests
getTaskExecutors(ExecutorStateManager.isAvailable).getTaskExecutors().forEach(taskExecutorID ->
self().tell(new CacheJobArtifactsOnTaskExecutorRequest(taskExecutorID, clusterID), self()));
}
private void onRemoveJobArtifactsToCacheRequest(RemoveJobArtifactsToCacheRequest req) {
try {
mantisJobStore.removeJobArtifactsToCache(req.getClusterID(), req.getArtifacts());
req.artifacts.forEach(jobArtifactsToCache::remove);
sender().tell(Ack.getInstance(), self());
} catch (IOException e) {
log.warn("Cannot remove job artifacts {} to cache in cluster: {}", req.getArtifacts(), req.getClusterID(), e);
}
}
private void fetchJobArtifactsToCache() {
try {
mantisJobStore.getJobArtifactsToCache(clusterID)
.stream()
.map(ArtifactID::of)
.forEach(jobArtifactsToCache::add);
} catch (IOException e) {
log.warn("Cannot refresh job artifacts to cache in cluster: {}", clusterID, e);
}
}
private GetClusterIdleInstancesResponse onGetClusterIdleInstancesRequest(GetClusterIdleInstancesRequest req) {
log.info("Computing idle instance list: {}", req);
if (!req.getClusterID().equals(this.clusterID)) {
throw new RuntimeException(String.format("Mismatch cluster ids %s, %s", req.getClusterID(), this.clusterID));
}
List<TaskExecutorID> instanceList = this.executorStateManager.getIdleInstanceList(req);
GetClusterIdleInstancesResponse res = GetClusterIdleInstancesResponse.builder()
.instanceIds(instanceList)
.clusterId(this.clusterID)
.skuId(req.getSkuId())
.build();
log.info("Return idle instance list: {}", res);
return res;
}
private TaskExecutorsList getTaskExecutors(Predicate<Entry<TaskExecutorID, TaskExecutorState>> predicate) {
return new TaskExecutorsList(this.executorStateManager.getTaskExecutors(predicate));
}
private void getActiveJobs(GetActiveJobsRequest req) {
List<String> pagedList = this.executorStateManager.getActiveJobs(req);
PagedActiveJobOverview res =
new PagedActiveJobOverview(
pagedList,
req.getStartingIndex().orElse(0) + pagedList.size()
);
log.info("Returning getActiveJobs res starting at {}: {}", req.getStartingIndex(), res.getActiveJobs().size());
sender().tell(res, self());
}
private void onTaskExecutorInfoRequest(TaskExecutorInfoRequest request) {
if (request.getTaskExecutorID() != null) {
TaskExecutorState state =
this.executorStateManager.getIncludeArchived(request.getTaskExecutorID());
if (state != null && state.getRegistration() != null) {
sender().tell(state.getRegistration(), self());
} else {
sender().tell(new Status.Failure(new Exception(String.format("No task executor state for %s",
request.getTaskExecutorID()))), self());
}
} else {
Optional<TaskExecutorRegistration> taskExecutorRegistration =
this.executorStateManager
.findFirst(
kv -> kv.getValue().getRegistration() != null &&
kv.getValue().getRegistration().getHostname().equals(request.getHostName()))
.map(Entry::getValue)
.map(TaskExecutorState::getRegistration);
if (taskExecutorRegistration.isPresent()) {
sender().tell(taskExecutorRegistration.get(), self());
} else {
sender().tell(new Status.Failure(new Exception(String.format("Unknown task executor for hostname %s", request.getHostName()))), self());
}
}
}
private void onAssignedTaskExecutorRequest(GetAssignedTaskExecutorRequest request) {
Optional<TaskExecutorID> matchedTaskExecutor =
this.executorStateManager.findFirst(
e -> e.getValue().isRunningOrAssigned(request.getWorkerId())).map(Entry::getKey);
if (matchedTaskExecutor.isPresent()) {
sender().tell(matchedTaskExecutor.get(), self());
} else {
sender().tell(new Status.Failure(new TaskNotFoundException(request.getWorkerId())),
self());
}
}
private void onTaskExecutorGatewayRequest(TaskExecutorGatewayRequest request) {
TaskExecutorState state = this.executorStateManager.get(request.getTaskExecutorID());
if (state == null) {
sender().tell(new NullPointerException("Null TaskExecutorState for: " + request.getTaskExecutorID()), self());
} else {
try {
if (state.isRegistered()) {
sender().tell(state.getGatewayAsync(), self());
} else {
sender().tell(
new Status.Failure(new IllegalStateException("Unregistered TaskExecutor: " + request.getTaskExecutorID())),
self());
}
} catch (Exception e) {
log.error("onTaskExecutorGatewayRequest error: {}", request, e);
metrics.incrementCounter(
ResourceClusterActorMetrics.TE_CONNECTION_FAILURE,
TagList.create(ImmutableMap.of(
"resourceCluster",
clusterID.getResourceID(),
"taskExecutor",
request.getTaskExecutorID().getResourceId())));
}
}
}
// custom equals function to check if the existing set already has the request under consideration.
private boolean addNewDisableTaskExecutorsRequest(DisableTaskExecutorsRequest newRequest) {
if (newRequest.isRequestByAttributes()) {
log.info("Req with attributes {}", newRequest);
for (DisableTaskExecutorsRequest existing: activeDisableTaskExecutorsByAttributesRequests) {
if (existing.targetsSameTaskExecutorsAs(newRequest)) {
return false;
}
}
Preconditions.checkState(activeDisableTaskExecutorsByAttributesRequests.add(newRequest), "activeDisableTaskExecutorRequests cannot contain %s", newRequest);
return true;
} else if (newRequest.getTaskExecutorID().isPresent() && !disabledTaskExecutors.contains(newRequest.getTaskExecutorID().get())) {
log.info("Req with id {}", newRequest);
disabledTaskExecutors.add(newRequest.getTaskExecutorID().get());
return true;
}
log.info("No Req {}", newRequest);
return false;
}
private void onNewDisableTaskExecutorsRequest(DisableTaskExecutorsRequest request) {
ActorRef sender = sender();
if (addNewDisableTaskExecutorsRequest(request)) {
try {
log.info("New req to add {}", request);
// store the request in a persistent store in order to retrieve it if the node goes down
mantisJobStore.storeNewDisabledTaskExecutorsRequest(request);
// figure out the time to expire the current request
Duration toExpiry = Comparators.max(Duration.between(clock.instant(), request.getExpiry()), Duration.ZERO);
// setup a timer to clear it after a given period
getTimers().startSingleTimer(
getExpiryKeyFor(request),
new ExpireDisableTaskExecutorsRequest(request),
toExpiry);
findAndMarkDisabledTaskExecutorsFor(request);
sender.tell(Ack.getInstance(), self());
} catch (IOException e) {
sender().tell(new Status.Failure(e), self());
}
} else {
sender.tell(Ack.getInstance(), self());
}
}
private String getExpiryKeyFor(DisableTaskExecutorsRequest request) {
return "ExpireDisableTaskExecutorsRequest-" + request;
}
private void findAndMarkDisabledTaskExecutorsFor(DisableTaskExecutorsRequest request) {
if (request.isRequestByAttributes()) {
findAndMarkDisabledTaskExecutors(new CheckDisabledTaskExecutors("new_request"));
} else if (request.getTaskExecutorID().isPresent()) {
final TaskExecutorID taskExecutorID = request.getTaskExecutorID().get();
final TaskExecutorState state = this.executorStateManager.get(taskExecutorID);
if (state == null) {
// If the TE is unknown by mantis, delete it from state
disabledTaskExecutors.remove(taskExecutorID);
self().tell(new ExpireDisableTaskExecutorsRequest(request), self());
} else {
log.info("Marking task executor {} as disabled", taskExecutorID);
state.onNodeDisabled();
}
}
}
private void findAndMarkDisabledTaskExecutors(CheckDisabledTaskExecutors r) {
log.info("Checking disabled task executors for Cluster {} because of {}", clusterID.getResourceID(), r.getReason());
final Instant now = clock.instant();
for (DisableTaskExecutorsRequest request : activeDisableTaskExecutorsByAttributesRequests) {
if (request.isExpired(now)) {
self().tell(new ExpireDisableTaskExecutorsRequest(request), self());
} else {
// go and mark all task executors that match the filter as disabled
this.executorStateManager.getActiveExecutorEntry().forEach(idAndState -> {
if (request.covers(idAndState.getValue().getRegistration())) {
if (idAndState.getValue().onNodeDisabled()) {
log.info("Marking task executor {} as disabled", idAndState.getKey());
}
}
});
}
}
}
private void onDisableTaskExecutorsRequestExpiry(ExpireDisableTaskExecutorsRequest request) {
try {
log.info("Expiring Disable Task Executors Request {}", request.getRequest());
getTimers().cancel(getExpiryKeyFor(request.getRequest()));
if (activeDisableTaskExecutorsByAttributesRequests.remove(request.getRequest()) || (request.getRequest().getTaskExecutorID().isPresent() && disabledTaskExecutors.remove(request.getRequest().getTaskExecutorID().get()))) {
mantisJobStore.deleteExpiredDisableTaskExecutorsRequest(request.getRequest());
}
} catch (Exception e) {
log.error("Failed to delete expired {}", request.getRequest());
}
}
private Map<TaskExecutorID, WorkerId> getTaskExecutorWorkerMapping(Map<String, String> attributes) {
final Map<TaskExecutorID, WorkerId> result = new HashMap<>();
this.executorStateManager.getActiveExecutorEntry().forEach(idAndState -> {
if (idAndState.getValue().getRegistration() != null && idAndState.getValue().getRegistration().containsAttributes(attributes)) {
if (idAndState.getValue().isRunningTask()) {
result.put(idAndState.getKey(), idAndState.getValue().getWorkerId());
}
}
});
return result;
}
private void onTaskExecutorInitialization(InitializeTaskExecutorRequest request) {
log.info("Initializing taskExecutor {} for the resource cluster {}", request.getTaskExecutorID(), this);
ActorRef sender = sender();
try {
TaskExecutorRegistration registration =
mantisJobStore.getTaskExecutor(request.getTaskExecutorID());
setupTaskExecutorStateIfNecessary(request.getTaskExecutorID());
self().tell(registration, self());
self().tell(
new TaskExecutorStatusChange(
registration.getTaskExecutorID(),
registration.getClusterID(),
TaskExecutorReport.occupied(request.getWorkerId())),
self());
sender.tell(Ack.getInstance(), self());
} catch (Exception e) {
log.error("Failed to initialize taskExecutor {}; all retries exhausted", request.getTaskExecutorID(), e);
sender.tell(new Status.Failure(e), self());
}
}
private void onTaskExecutorRegistration(TaskExecutorRegistration registration) {
setupTaskExecutorStateIfNecessary(registration.getTaskExecutorID());
log.info("Request for registering on resource cluster {}: {}.", this, registration);
try {
final TaskExecutorID taskExecutorID = registration.getTaskExecutorID();
final TaskExecutorState state = this.executorStateManager.get(taskExecutorID);
boolean stateChange = state.onRegistration(registration);
mantisJobStore.storeNewTaskExecutor(registration);
if (stateChange) {
if (state.isAvailable()) {
this.executorStateManager.tryMarkAvailable(taskExecutorID);
}
// check if the task executor has been marked as 'Disabled'
if (isTaskExecutorDisabled(registration)) {
log.info("Newly registered task executor {} was already marked for disabling.", registration.getTaskExecutorID());
state.onNodeDisabled();
}
updateHeartbeatTimeout(registration.getTaskExecutorID());
}
log.info("Successfully registered {} with the resource cluster {}", registration.getTaskExecutorID(), this);
if (!jobArtifactsToCache.isEmpty() && isJobArtifactCachingEnabled) {
self().tell(new CacheJobArtifactsOnTaskExecutorRequest(taskExecutorID, clusterID), self());
}
sender().tell(Ack.getInstance(), self());
} catch (Exception e) {
sender().tell(new Status.Failure(e), self());
}
}
private boolean isTaskExecutorDisabled(TaskExecutorRegistration registration) {
for (DisableTaskExecutorsRequest request: activeDisableTaskExecutorsByAttributesRequests) {
if (request.covers(registration)) {
return true;
}
}
return disabledTaskExecutors.contains(registration.getTaskExecutorID());
}
private void onHeartbeat(TaskExecutorHeartbeat heartbeat) {
log.debug("Received heartbeat {} from task executor {}", heartbeat, heartbeat.getTaskExecutorID());
setupTaskExecutorStateIfNecessary(heartbeat.getTaskExecutorID());
try {
final TaskExecutorID taskExecutorID = heartbeat.getTaskExecutorID();
final TaskExecutorState state = this.executorStateManager.get(taskExecutorID);
if (state.getRegistration() == null || !state.isRegistered()) {
TaskExecutorRegistration registration = this.mantisJobStore.getTaskExecutor(heartbeat.getTaskExecutorID());
if (registration != null) {
log.debug("Found registration {} for task executor {}", registration, heartbeat.getTaskExecutorID());
Preconditions.checkState(state.onRegistration(registration));
} else {
// TODO(sundaram): add a metric
log.warn("Received heartbeat from unknown task executor {}", heartbeat.getTaskExecutorID());
sender().tell(new Status.Failure(new TaskExecutorNotFoundException(taskExecutorID)), self());
return;
}
} else {
log.debug("Found registration {} for registered task executor {}",
state.getRegistration(), heartbeat.getTaskExecutorID());
}
boolean stateChange = state.onHeartbeat(heartbeat);
if (stateChange && state.isAvailable()) {
this.executorStateManager.tryMarkAvailable(taskExecutorID);
}
updateHeartbeatTimeout(heartbeat.getTaskExecutorID());
log.debug("Successfully processed heartbeat {} from task executor {}", heartbeat, heartbeat.getTaskExecutorID());
sender().tell(Ack.getInstance(), self());
} catch (Exception e) {
sender().tell(new Status.Failure(e), self());
}
}
private void onTaskExecutorStatusChange(TaskExecutorStatusChange statusChange) {
setupTaskExecutorStateIfNecessary(statusChange.getTaskExecutorID());
try {
final TaskExecutorID taskExecutorID = statusChange.getTaskExecutorID();
final TaskExecutorState state = this.executorStateManager.get(taskExecutorID);
boolean stateChange = state.onTaskExecutorStatusChange(statusChange);
if (stateChange) {
if (state.isAvailable()) {
this.executorStateManager.tryMarkAvailable(taskExecutorID);
} else {
this.executorStateManager.tryMarkUnavailable(taskExecutorID);
}
}
updateHeartbeatTimeout(statusChange.getTaskExecutorID());
sender().tell(Ack.getInstance(), self());
} catch (IllegalStateException e) {
sender().tell(new Status.Failure(e), self());
}
}
private void onTaskExecutorAssignmentRequest(TaskExecutorAssignmentRequest request) {
Optional<Pair<TaskExecutorID, TaskExecutorState>> matchedExecutor =
this.executorStateManager.findBestFit(request);
if (matchedExecutor.isPresent()) {
log.info("matched executor {} for request {}", matchedExecutor.get().getKey(), request);
// trigger job artifact cache if needed
if(shouldCacheJobArtifacts(request)) {
self().tell(new AddNewJobArtifactsToCacheRequest(clusterID, Collections.singletonList(request.getAllocationRequest().getJobMetadata().getJobArtifact())), self());
}
matchedExecutor.get().getValue().onAssignment(request.getAllocationRequest().getWorkerId());
// let's give some time for the assigned executor to be scheduled work. otherwise, the assigned executor
// will be returned back to the pool.
getTimers().startSingleTimer(
"Assignment-" + matchedExecutor.get().getKey().toString(),
new TaskExecutorAssignmentTimeout(matchedExecutor.get().getKey()),
assignmentTimeout);
sender().tell(matchedExecutor.get().getKey(), self());
} else {
metrics.incrementCounter(
ResourceClusterActorMetrics.NO_RESOURCES_AVAILABLE,
TagList.create(ImmutableMap.of(
"resourceCluster",
clusterID.getResourceID(),
"workerId",
request.getAllocationRequest().getWorkerId().getId(),
"jobCluster",
request.getAllocationRequest().getWorkerId().getJobCluster(),
"cpuCores",
String.valueOf(request.getAllocationRequest().getMachineDefinition().getCpuCores()))));
sender().tell(new Status.Failure(new NoResourceAvailableException(
String.format("No resource available for request %s: resource overview: %s", request,
getResourceOverview()))), self());
}
}
private void onTaskExecutorAssignmentTimeout(TaskExecutorAssignmentTimeout request) {
TaskExecutorState state = this.executorStateManager.get(request.getTaskExecutorID());
if (state == null) {
log.error("TaskExecutor lost during task assignment: {}", request);
}
else if (state.isRunningTask()) {
log.debug("TaskExecutor {} entered running state already; no need to act", request.getTaskExecutorID());
} else {
try
{
boolean stateChange = state.onUnassignment();
if (stateChange) {
this.executorStateManager.tryMarkAvailable(request.getTaskExecutorID());
}
} catch (IllegalStateException e) {
if (state.isRegistered()) {
log.error("Failed to un-assign registered taskExecutor {}", request.getTaskExecutorID(), e);
} else {
log.debug("Failed to un-assign unRegistered taskExecutor {}", request.getTaskExecutorID(), e);
}
}
}
}
private void onResourceOverviewRequest(ResourceOverviewRequest request) {
sender().tell(getResourceOverview(), self());
}
private void onPublishResourceOverviewMetricsRequest(PublishResourceOverviewMetricsRequest request) {
publishResourceClusterMetricBySKU(getTaskExecutors(ExecutorStateManager.isRegistered), ResourceClusterActorMetrics.NUM_REGISTERED_TE);
publishResourceClusterMetricBySKU(getTaskExecutors(ExecutorStateManager.isBusy), ResourceClusterActorMetrics.NUM_BUSY_TE);
publishResourceClusterMetricBySKU(getTaskExecutors(ExecutorStateManager.isAvailable), ResourceClusterActorMetrics.NUM_AVAILABLE_TE);
publishResourceClusterMetricBySKU(getTaskExecutors(ExecutorStateManager.isDisabled), ResourceClusterActorMetrics.NUM_DISABLED_TE);
publishResourceClusterMetricBySKU(getTaskExecutors(ExecutorStateManager.unregistered), ResourceClusterActorMetrics.NUM_UNREGISTERED_TE);
publishResourceClusterMetricBySKU(getTaskExecutors(ExecutorStateManager.isAssigned), ResourceClusterActorMetrics.NUM_ASSIGNED_TE);
}
private void publishResourceClusterMetricBySKU(TaskExecutorsList taskExecutorsList, String metricName) {
try {
taskExecutorsList.getTaskExecutors()
.stream()
.map(this::getTaskExecutorState)
.filter(Objects::nonNull)
.map(TaskExecutorState::getRegistration)
.filter(Objects::nonNull)
.filter(registration -> registration.getTaskExecutorContainerDefinitionId().isPresent() && registration.getAttributeByKey(WorkerConstants.AUTO_SCALE_GROUP_KEY).isPresent())
.collect(groupingBy(registration -> Tuple.of(registration.getTaskExecutorContainerDefinitionId().get(), registration.getAttributeByKey(WorkerConstants.AUTO_SCALE_GROUP_KEY).get()), Collectors.counting()))
.forEach((keys, count) -> metrics.setGauge(
metricName,
count,
TagList.create(ImmutableMap.of("resourceCluster", clusterID.getResourceID(), "sku", keys._1.getResourceID(), "autoScaleGroup", keys._2))));
} catch (Exception e) {
log.warn("Error while publishing resource cluster metrics by sku. RC: {}, Metric: {}.", clusterID.getResourceID(), metricName, e);
}
}
private ResourceOverview getResourceOverview() {
return this.executorStateManager.getResourceOverview();
}
private void getTaskExecutorStatus(GetTaskExecutorStatusRequest req) {
TaskExecutorID taskExecutorID = req.getTaskExecutorID();
final TaskExecutorState state = this.executorStateManager.get(taskExecutorID);
if (state == null) {
log.info("Unknown executorID: {}", taskExecutorID);
getSender().tell(
new Status.Failure(new TaskExecutorNotFoundException(taskExecutorID)),
self());
}
else {
getSender().tell(
new TaskExecutorStatus(
state.getRegistration(),
state.isRegistered(),
state.isRunningTask(),
state.isAssigned(),
state.isDisabled(),
state.getWorkerId(),
state.getLastActivity().toEpochMilli()),
self());
}
}
@Nullable
private TaskExecutorState getTaskExecutorState(TaskExecutorID taskExecutorID) {
return this.executorStateManager.get(taskExecutorID);
}
private void onTaskExecutorDisconnection(TaskExecutorDisconnection disconnection) {
setupTaskExecutorStateIfNecessary(disconnection.getTaskExecutorID());
try {
disconnectTaskExecutor(disconnection.getTaskExecutorID());
sender().tell(Ack.getInstance(), self());
} catch (IllegalStateException e) {
sender().tell(new Status.Failure(e), self());
}
}
private void disconnectTaskExecutor(TaskExecutorID taskExecutorID) {
final TaskExecutorState state = this.executorStateManager.get(taskExecutorID);
boolean stateChange = state.onDisconnection();
if (stateChange) {
this.executorStateManager.archive(taskExecutorID);
getTimers().cancel(getHeartbeatTimerFor(taskExecutorID));
}
}
private String getHeartbeatTimerFor(TaskExecutorID taskExecutorID) {
return "Heartbeat-" + taskExecutorID.toString();
}
private void onTaskExecutorHeartbeatTimeout(HeartbeatTimeout timeout) {
setupTaskExecutorStateIfNecessary(timeout.getTaskExecutorID());
try {
metrics.incrementCounter(
ResourceClusterActorMetrics.HEARTBEAT_TIMEOUT,
TagList.create(ImmutableMap.of("resourceCluster", clusterID.getResourceID(), "taskExecutorID", timeout.getTaskExecutorID().getResourceId())));
log.info("heartbeat timeout received for {}", timeout.getTaskExecutorID());
final TaskExecutorID taskExecutorID = timeout.getTaskExecutorID();
final TaskExecutorState state = this.executorStateManager.get(taskExecutorID);
if (state.getLastActivity().compareTo(timeout.getLastActivity()) <= 0) {
log.info("Disconnecting task executor {}", timeout.getTaskExecutorID());
disconnectTaskExecutor(timeout.getTaskExecutorID());
}
} catch (IllegalStateException e) {
sender().tell(new Status.Failure(e), self());
}
}
private void setupTaskExecutorStateIfNecessary(TaskExecutorID taskExecutorID) {
this.executorStateManager
.trackIfAbsent(taskExecutorID, TaskExecutorState.of(clock, rpcService, jobMessageRouter));
}
private void updateHeartbeatTimeout(TaskExecutorID taskExecutorID) {
final TaskExecutorState state = this.executorStateManager.get(taskExecutorID);
getTimers().startSingleTimer(
getHeartbeatTimerFor(taskExecutorID),
new HeartbeatTimeout(taskExecutorID, state.getLastActivity()),
heartbeatTimeout);
}
private void onCacheJobArtifactsOnTaskExecutorRequest(CacheJobArtifactsOnTaskExecutorRequest request) {
TaskExecutorState state = this.executorStateManager.get(request.getTaskExecutorID());
if (state != null && state.isRegistered()) {
try {
// TODO(fdichiara): store URI directly to avoid remapping for each TE
state.getGatewayAsync()
.thenComposeAsync(taskExecutorGateway ->
taskExecutorGateway.cacheJobArtifacts(new CacheJobArtifactsRequest(
jobArtifactsToCache
.stream()
.map(artifactID -> URI.create(artifactID.getResourceID()))
.collect(Collectors.toList()))))
.whenComplete((res, throwable) -> {
if (throwable != null) {
log.error("failed to cache artifact on {}", request.getTaskExecutorID(), throwable);
}
else {
log.debug("Acked from cacheJobArtifacts for {}", request.getTaskExecutorID());
}
});
} catch (Exception ex) {
log.warn("Failed to cache job artifacts in task executor {}", request.getTaskExecutorID(), ex);
}
}
else {
log.debug("no valid TE state for CacheJobArtifactsOnTaskExecutorRequest: {}", request);
}
}
/**
* Artifact is added to the list of artifacts if it's the first worker of the first stage
* (this is to reduce the work in master) and if the job cluster is enabled (via config
* for now)
*/
private boolean shouldCacheJobArtifacts(TaskExecutorAssignmentRequest request) {
final WorkerId workerId = request.getAllocationRequest().getWorkerId();
final boolean isFirstWorkerOfFirstStage = request.getAllocationRequest().getStageNum() == 1 && workerId.getWorkerIndex() == 0;
if (isFirstWorkerOfFirstStage) {
final Set<String> jobClusters = getJobClustersWithArtifactCachingEnabled();
return jobClusters.contains(workerId.getJobCluster());
}
return false;
}
private Set<String> getJobClustersWithArtifactCachingEnabled() {
return new HashSet<>(Arrays.asList(jobClustersWithArtifactCachingEnabled.split(",")));
}
@Value
static class HeartbeatTimeout {
TaskExecutorID taskExecutorID;
Instant lastActivity;
}
@Value
static class TaskExecutorAssignmentRequest {
TaskExecutorAllocationRequest allocationRequest;
ClusterID clusterID;
}
@Value
static class TaskExecutorAssignmentTimeout {
TaskExecutorID taskExecutorID;
}
@Value
static class ExpireDisableTaskExecutorsRequest {
DisableTaskExecutorsRequest request;
}
@Value
static class InitializeTaskExecutorRequest {
TaskExecutorID taskExecutorID;
WorkerId workerId;
}
@Value
static class ResourceOverviewRequest {
ClusterID clusterID;
}
@Value
static class TaskExecutorInfoRequest {
@Nullable
TaskExecutorID taskExecutorID;
@Nullable
String hostName;
ClusterID clusterID;
}
@Value
static class GetAssignedTaskExecutorRequest {
WorkerId workerId;
ClusterID clusterID;
}
@Value
static class TaskExecutorGatewayRequest {
TaskExecutorID taskExecutorID;
ClusterID clusterID;
}
@Value
static class GetRegisteredTaskExecutorsRequest implements HasAttributes {
ClusterID clusterID;
Map<String, String> attributes;
}
@Value
@Builder
@AllArgsConstructor // needed for build to work with custom ctor.
static class GetActiveJobsRequest {
ClusterID clusterID;
Optional<Integer> startingIndex;
Optional<Integer> pageSize;
public GetActiveJobsRequest(ClusterID clusterID) {
this.clusterID = clusterID;
this.pageSize = Optional.empty();
this.startingIndex = Optional.empty();
}
}
interface HasAttributes {
Map<String, String> getAttributes();
}
@Value
static class GetAvailableTaskExecutorsRequest implements HasAttributes {
ClusterID clusterID;
Map<String, String> attributes;
}
@Value
static class GetDisabledTaskExecutorsRequest implements HasAttributes {
ClusterID clusterID;
Map<String, String> attributes;
}
@Value
static class GetBusyTaskExecutorsRequest implements HasAttributes {
ClusterID clusterID;
Map<String, String> attributes;
}
@Value
static class GetUnregisteredTaskExecutorsRequest implements HasAttributes {
ClusterID clusterID;
Map<String, String> attributes;
}
@Value
static class GetTaskExecutorStatusRequest {
TaskExecutorID taskExecutorID;
ClusterID clusterID;
}
@Value
static class TaskExecutorsList {
List<TaskExecutorID> taskExecutors;
}
@Value
static class ArtifactList {
List<ArtifactID> artifacts;
}
@Value
static class GetClusterUsageRequest {
ClusterID clusterID;
Function<TaskExecutorRegistration, Optional<String>> groupKeyFunc;
}
@Value
private static class CheckDisabledTaskExecutors {
String reason;
}
@Value
static class GetTaskExecutorWorkerMappingRequest {
Map<String, String> attributes;
}
@Value
private static class PublishResourceOverviewMetricsRequest {
}
@Value
static class CacheJobArtifactsOnTaskExecutorRequest {
TaskExecutorID taskExecutorID;
ClusterID clusterID;
}
@Value
@Builder
static class AddNewJobArtifactsToCacheRequest {
ClusterID clusterID;
List<ArtifactID> artifacts;
}
@Value
@Builder
static class RemoveJobArtifactsToCacheRequest {
ClusterID clusterID;
List<ArtifactID> artifacts;
}
@Value
@Builder
static class GetJobArtifactsToCacheRequest {
ClusterID clusterID;
}
/**
* Represents the Availability of a given node in the resource cluster.
* Can go from PENDING -> ASSIGNED(workerId) -> RUNNING(workerId) -> PENDING
* in the happy path.
*/
interface AvailabilityState {
@Nullable
WorkerId getWorkerId();
AvailabilityState onAssignment(WorkerId workerId);
AvailabilityState onUnassignment();
AvailabilityState onTaskExecutorStatusChange(TaskExecutorReport report);
Pending PENDING = new Pending();
static AvailabilityState pending() {
return PENDING;
}
static AvailabilityState assigned(WorkerId workerId) {
return new Assigned(workerId);
}
static AvailabilityState running(WorkerId workerId) {
return new Running(workerId);
}
default <T> T throwInvalidTransition() throws IllegalStateException {
throw new IllegalStateException(
String.format("availability state was %s when worker was unassigned", this));
}
default <T> T throwInvalidTransition(WorkerId workerId) throws IllegalStateException {
throw new IllegalStateException(
String.format("availability state was %s when workerId %s was assigned",
this, workerId));
}
default <T> T throwInvalidTransition(TaskExecutorReport report) throws IllegalStateException {
throw new IllegalStateException(
String.format("availability state was %s when report %s was received", this, report));
}
}
@Value
static class Pending implements AvailabilityState {
@Override
public WorkerId getWorkerId() {
return null;
}
@Override
public AvailabilityState onAssignment(WorkerId workerId) {
return AvailabilityState.assigned(workerId);
}
@Override
public AvailabilityState onUnassignment() {
return this;
}
@Override
public AvailabilityState onTaskExecutorStatusChange(TaskExecutorReport report) {
if (report instanceof Available) {
return this;
} else if (report instanceof Occupied) {
return AvailabilityState.running(((Occupied) report).getWorkerId());
} else {
return throwInvalidTransition(report);
}
}
}
@Value
static class Assigned implements AvailabilityState {
WorkerId workerId;
@Override
public AvailabilityState onAssignment(WorkerId workerId) {
if (this.workerId.equals(workerId)) {
return this;
} else {
return throwInvalidTransition(workerId);
}
}
@Override
public AvailabilityState onUnassignment() {
return AvailabilityState.pending();
}
@Override
public AvailabilityState onTaskExecutorStatusChange(TaskExecutorReport report) {
if (report instanceof Available) {
return this;
} else if (report instanceof Occupied) {
return AvailabilityState.running(workerId);
} else {
return throwInvalidTransition(report);
}
}
}
@Value
static class Running implements AvailabilityState {
WorkerId workerId;
@Override
public AvailabilityState onAssignment(WorkerId workerId) {
return throwInvalidTransition(workerId);
}
@Override
public AvailabilityState onUnassignment() {
return throwInvalidTransition();
}
@Override
public AvailabilityState onTaskExecutorStatusChange(TaskExecutorReport report) {
if (report instanceof Available) {
return AvailabilityState.pending();
} else if (report instanceof Occupied) {
return this;
} else {
return throwInvalidTransition(report);
}
}
}
private Predicate<Entry<TaskExecutorID, TaskExecutorState>> filterByAttrs(HasAttributes hasAttributes) {
if (hasAttributes.getAttributes().isEmpty()) {
return e -> true;
} else {
return e -> e.getValue().containsAttributes(hasAttributes.getAttributes());
}
}
}
| 8,086 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/TaskExecutorState.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.Assigned;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.AvailabilityState;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.Pending;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.Running;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.resourcecluster.TaskExecutorHeartbeat;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.server.master.resourcecluster.TaskExecutorReport;
import io.mantisrx.server.master.resourcecluster.TaskExecutorReport.Available;
import io.mantisrx.server.master.resourcecluster.TaskExecutorReport.Occupied;
import io.mantisrx.server.master.resourcecluster.TaskExecutorStatusChange;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import io.mantisrx.server.master.scheduler.WorkerOnDisabledVM;
import io.mantisrx.server.worker.TaskExecutorGateway;
import java.time.Clock;
import java.time.Instant;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.runtime.rpc.RpcService;
@SuppressWarnings("UnusedReturnValue")
@AllArgsConstructor
@Slf4j
class TaskExecutorState {
enum RegistrationState {
Registered,
Unregistered,
}
private RegistrationState state;
@Nullable
private TaskExecutorRegistration registration;
// availabilityState being null here represents that we don't know about the actual state of the task executor
// and are waiting for more information
@Nullable
private AvailabilityState availabilityState;
private boolean disabled;
// last interaction initiated by the task executor
private Instant lastActivity;
private final Clock clock;
private final RpcService rpcService;
private final JobMessageRouter jobMessageRouter;
static TaskExecutorState of(Clock clock, RpcService rpcService, JobMessageRouter jobMessageRouter) {
return new TaskExecutorState(
RegistrationState.Unregistered,
null,
null,
false,
clock.instant(),
clock,
rpcService,
jobMessageRouter);
}
boolean isRegistered() {
return state == RegistrationState.Registered;
}
boolean isDisconnected() {
return !isRegistered();
}
boolean isDisabled() {
return disabled;
}
boolean onRegistration(TaskExecutorRegistration registration) {
if (state == RegistrationState.Registered) {
return false;
} else {
this.state = RegistrationState.Registered;
this.registration = registration;
updateTicker();
return true;
}
}
boolean onDisconnection() {
if (state == RegistrationState.Unregistered) {
return false;
} else {
state = RegistrationState.Unregistered;
registration = null;
setAvailabilityState(null);
updateTicker();
return true;
}
}
private static AvailabilityState from(TaskExecutorReport report) {
if (report instanceof Available) {
return AvailabilityState.pending();
} else if (report instanceof Occupied) {
return AvailabilityState.running(((Occupied) report).getWorkerId());
} else {
throw new RuntimeException(String.format("TaskExecutorReport=%s was unexpected", report));
}
}
boolean onAssignment(WorkerId workerId) throws IllegalStateException {
if (!isRegistered()) {
throwNotRegistered(String.format("assignment to %s", workerId));
}
if (this.availabilityState == null) {
throw new IllegalStateException("availability state was null when unassignmentas was issued");
}
return setAvailabilityState(this.availabilityState.onAssignment(workerId));
}
boolean onUnassignment() throws IllegalStateException {
if (this.availabilityState == null) {
throw new IllegalStateException("availability state was null when unassignment was issued");
}
return setAvailabilityState(this.availabilityState.onUnassignment());
}
boolean onNodeDisabled() {
if (!this.disabled) {
this.disabled = true;
if (this.availabilityState instanceof Running) {
jobMessageRouter.routeWorkerEvent(new WorkerOnDisabledVM(this.availabilityState.getWorkerId()));
}
return true;
} else {
return false;
}
}
boolean onHeartbeat(TaskExecutorHeartbeat heartbeat) throws IllegalStateException {
if (!isRegistered()) {
throwNotRegistered(String.format("heartbeat %s", heartbeat));
}
boolean result = handleStatusChange(heartbeat.getTaskExecutorReport());
updateTicker();
return result;
}
boolean onTaskExecutorStatusChange(TaskExecutorStatusChange statusChange) {
if (!isRegistered()) {
throwNotRegistered(String.format("status change %s", statusChange));
}
boolean result = handleStatusChange(statusChange.getTaskExecutorReport());
updateTicker();
return result;
}
private boolean handleStatusChange(TaskExecutorReport report) throws IllegalStateException {
if (availabilityState == null) {
return setAvailabilityState(from(report));
} else {
return setAvailabilityState(availabilityState.onTaskExecutorStatusChange(report));
}
}
private boolean setAvailabilityState(AvailabilityState newState) {
if (this.availabilityState != newState) {
this.availabilityState = newState;
if (this.availabilityState instanceof Running) {
if (isDisabled()) {
jobMessageRouter.routeWorkerEvent(new WorkerOnDisabledVM(newState.getWorkerId()));
}
}
return true;
} else {
return false;
}
}
@Nullable
protected WorkerId getWorkerId() {
if (this.availabilityState != null) {
return this.availabilityState.getWorkerId();
} else {
return null;
}
}
private void throwNotRegistered(String message) throws IllegalStateException {
throw new IllegalStateException(
String.format("Task Executor un-registered when it received %s", message));
}
private void updateTicker() {
this.lastActivity = clock.instant();
}
boolean isAvailable() {
return this.availabilityState instanceof Pending && !isDisabled();
}
boolean isRunningTask() {
return this.availabilityState instanceof Running;
}
boolean isAssigned() {
return this.availabilityState instanceof Assigned;
}
boolean isRunningOrAssigned(WorkerId workerId) {
return this.getWorkerId() != null && this.getWorkerId().equals(workerId);
}
// Captures the last interaction from the task executor. Any interactions
// that are caused from within the server do not cause an uptick.
Instant getLastActivity() {
return this.lastActivity;
}
TaskExecutorRegistration getRegistration() {
return this.registration;
}
protected CompletableFuture<TaskExecutorGateway> getGatewayAsync() {
if (this.registration == null || this.state == RegistrationState.Unregistered) {
throw new IllegalStateException("TE is unregistered");
}
// [Note] here the gateway connection is re-created every time it's requested to avoid corrupted state that
// can block the connection to TE.
// To be able to store and re-use the gateway, we probably need to make a chain of callbacks so taht the TE
// is only marked as available after this gateway connection is successfully established with proper retry
// loops (since the TE only register once and take the ack as success on API response).
return rpcService.connect(registration.getTaskExecutorAddress(), TaskExecutorGateway.class)
.whenComplete((gateway, throwable) -> {
if (throwable != null) {
log.error("Failed to connect to the gateway", throwable);
}
});
}
boolean containsAttributes(Map<String, String> attributes) {
return registration != null && registration.containsAttributes(attributes);
}
}
| 8,087 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/ResourceClusterAkkaImpl.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import akka.actor.ActorRef;
import akka.pattern.Patterns;
import io.mantisrx.common.Ack;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.AddNewJobArtifactsToCacheRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.ArtifactList;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetActiveJobsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetAssignedTaskExecutorRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetAvailableTaskExecutorsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetBusyTaskExecutorsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetJobArtifactsToCacheRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetRegisteredTaskExecutorsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetTaskExecutorStatusRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetTaskExecutorWorkerMappingRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetUnregisteredTaskExecutorsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.InitializeTaskExecutorRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.RemoveJobArtifactsToCacheRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.ResourceOverviewRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.TaskExecutorAssignmentRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.TaskExecutorGatewayRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.TaskExecutorInfoRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.TaskExecutorsList;
import io.mantisrx.master.resourcecluster.ResourceClusterScalerActor.TriggerClusterRuleRefreshRequest;
import io.mantisrx.master.resourcecluster.proto.SetResourceClusterScalerStatusRequest;
import io.mantisrx.server.core.domain.ArtifactID;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import io.mantisrx.server.master.resourcecluster.PagedActiveJobOverview;
import io.mantisrx.server.master.resourcecluster.ResourceCluster;
import io.mantisrx.server.master.resourcecluster.TaskExecutorAllocationRequest;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.server.worker.TaskExecutorGateway;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.function.Supplier;
class ResourceClusterAkkaImpl extends ResourceClusterGatewayAkkaImpl implements ResourceCluster {
private final ClusterID clusterID;
public ResourceClusterAkkaImpl(
ActorRef resourceClusterManagerActor,
Duration askTimeout,
ClusterID clusterID,
Supplier<Integer> rateLimitPerSecond) {
super(resourceClusterManagerActor, askTimeout, rateLimitPerSecond);
this.clusterID = clusterID;
}
@Override
public String getName() {
return clusterID.getResourceID();
}
@Override
public CompletableFuture<Ack> initializeTaskExecutor(TaskExecutorID taskExecutorID, WorkerId workerId) {
return Patterns.ask(
resourceClusterManagerActor,
new InitializeTaskExecutorRequest(taskExecutorID, workerId),
askTimeout)
.thenApply(Ack.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<List<TaskExecutorID>> getRegisteredTaskExecutors(Map<String, String> attributes) {
return Patterns.ask(
resourceClusterManagerActor,
new GetRegisteredTaskExecutorsRequest(clusterID, attributes), askTimeout)
.thenApply(TaskExecutorsList.class::cast)
.toCompletableFuture()
.thenApply(l -> l.getTaskExecutors());
}
@Override
public CompletableFuture<List<TaskExecutorID>> getAvailableTaskExecutors(Map<String, String> attributes) {
return Patterns.ask(
resourceClusterManagerActor,
new GetAvailableTaskExecutorsRequest(clusterID, attributes), askTimeout)
.thenApply(TaskExecutorsList.class::cast)
.toCompletableFuture()
.thenApply(l -> l.getTaskExecutors());
}
@Override
public CompletableFuture<List<TaskExecutorID>> getBusyTaskExecutors(Map<String, String> attributes) {
return Patterns.ask(
resourceClusterManagerActor,
new GetBusyTaskExecutorsRequest(clusterID, attributes), askTimeout)
.thenApply(TaskExecutorsList.class::cast)
.toCompletableFuture()
.thenApply(l -> l.getTaskExecutors());
}
@Override
public CompletableFuture<List<TaskExecutorID>> getUnregisteredTaskExecutors(Map<String, String> attributes) {
return Patterns.ask(
resourceClusterManagerActor,
new GetUnregisteredTaskExecutorsRequest(clusterID, attributes), askTimeout)
.thenApply(TaskExecutorsList.class::cast)
.toCompletableFuture()
.thenApply(l -> l.getTaskExecutors());
}
@Override
public CompletableFuture<ResourceOverview> resourceOverview() {
return
Patterns
.ask(resourceClusterManagerActor, new ResourceOverviewRequest(clusterID), askTimeout)
.thenApply(ResourceOverview.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<Ack> addNewJobArtifactsToCache(ClusterID clusterID, List<ArtifactID> artifacts) {
return Patterns
.ask(
resourceClusterManagerActor,
new AddNewJobArtifactsToCacheRequest(clusterID, artifacts),
askTimeout)
.thenApply(Ack.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<Ack> removeJobArtifactsToCache(List<ArtifactID> artifacts) {
return Patterns
.ask(
resourceClusterManagerActor,
new RemoveJobArtifactsToCacheRequest(clusterID, artifacts),
askTimeout)
.thenApply(Ack.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<List<ArtifactID>> getJobArtifactsToCache() {
return Patterns
.ask(
resourceClusterManagerActor,
new GetJobArtifactsToCacheRequest(clusterID),
askTimeout)
.thenApply(ArtifactList.class::cast)
.toCompletableFuture()
.thenApply(ArtifactList::getArtifacts);
}
@Override
public CompletableFuture<TaskExecutorID> getTaskExecutorFor(TaskExecutorAllocationRequest allocationRequest) {
return
Patterns
.ask(resourceClusterManagerActor, new TaskExecutorAssignmentRequest(allocationRequest, clusterID), askTimeout)
.thenApply(TaskExecutorID.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<TaskExecutorID> getTaskExecutorAssignedFor(WorkerId workerId) {
return
Patterns
.ask(resourceClusterManagerActor, new GetAssignedTaskExecutorRequest(workerId, clusterID), askTimeout)
.thenApply(TaskExecutorID.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<TaskExecutorGateway> getTaskExecutorGateway(
TaskExecutorID taskExecutorID) {
return
(CompletableFuture<TaskExecutorGateway>) Patterns
.ask(resourceClusterManagerActor, new TaskExecutorGatewayRequest(taskExecutorID, clusterID),
askTimeout)
.thenComposeAsync(result -> {
if (result instanceof CompletableFuture) {
return (CompletableFuture<TaskExecutorGateway>) result;
} else {
CompletableFuture<TaskExecutorGateway> exceptionFuture = new CompletableFuture<>();
exceptionFuture.completeExceptionally(new RuntimeException(
"Unexpected object type on getTaskExecutorGateway: " + result.getClass().getName()));
return exceptionFuture;
}
});
}
@Override
public CompletableFuture<TaskExecutorRegistration> getTaskExecutorInfo(String hostName) {
return
Patterns
.ask(resourceClusterManagerActor, new TaskExecutorInfoRequest(null, hostName, clusterID), askTimeout)
.thenApply(TaskExecutorRegistration.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<TaskExecutorRegistration> getTaskExecutorInfo(
TaskExecutorID taskExecutorID) {
return
Patterns
.ask(resourceClusterManagerActor, new TaskExecutorInfoRequest(taskExecutorID, null, clusterID), askTimeout)
.thenApply(TaskExecutorRegistration.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<TaskExecutorStatus> getTaskExecutorState(TaskExecutorID taskExecutorID) {
return
Patterns
.ask(resourceClusterManagerActor, new GetTaskExecutorStatusRequest(taskExecutorID, clusterID), askTimeout)
.thenApply(TaskExecutorStatus.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<Ack> refreshClusterScalerRuleSet() {
return Patterns
.ask(
resourceClusterManagerActor,
TriggerClusterRuleRefreshRequest.builder().clusterID(this.clusterID).build(),
askTimeout)
.thenApply(Ack.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<Ack> disableTaskExecutorsFor(
Map<String, String> attributes,
Instant expiry,
Optional<TaskExecutorID> taskExecutorID) {
final DisableTaskExecutorsRequest msg = new DisableTaskExecutorsRequest(attributes, clusterID, expiry, taskExecutorID);
return
Patterns
.ask(resourceClusterManagerActor, msg, askTimeout)
.thenApply(Ack.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<Ack> setScalerStatus(ClusterID clusterID, ContainerSkuID skuID, Boolean enabled, Long expirationDurationInSeconds) {
final SetResourceClusterScalerStatusRequest msg = SetResourceClusterScalerStatusRequest
.builder()
.skuId(skuID)
.clusterID(clusterID)
.enabled(enabled)
.expirationDurationInSeconds(expirationDurationInSeconds)
.build();
return
Patterns
.ask(resourceClusterManagerActor, msg, askTimeout)
.thenApply(Ack.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<PagedActiveJobOverview> getActiveJobOverview(
Optional<Integer> startingIndex,
Optional<Integer> maxSize) {
final GetActiveJobsRequest msg = GetActiveJobsRequest.builder()
.clusterID(clusterID)
.startingIndex(startingIndex)
.pageSize(maxSize)
.build();
return
Patterns
.ask(resourceClusterManagerActor, msg, askTimeout)
.thenApply(PagedActiveJobOverview.class::cast)
.toCompletableFuture();
}
@Override
public CompletableFuture<Map<TaskExecutorID, WorkerId>> getTaskExecutorWorkerMapping() {
return
Patterns
.ask(resourceClusterManagerActor, new GetTaskExecutorWorkerMappingRequest(ImmutableMap.of()), askTimeout)
.thenApply(obj -> (Map<TaskExecutorID, WorkerId>) obj)
.toCompletableFuture();
}
@Override
public CompletableFuture<Map<TaskExecutorID, WorkerId>> getTaskExecutorWorkerMapping(Map<String, String> attributes) {
return
Patterns
.ask(resourceClusterManagerActor, new GetTaskExecutorWorkerMappingRequest(attributes), askTimeout)
.thenApply(obj -> (Map<TaskExecutorID, WorkerId>) obj)
.toCompletableFuture();
}
}
| 8,088 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/ResourceClusterActorMetrics.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import akka.japi.pf.FI;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.api.Timer;
import io.mantisrx.common.metrics.spectator.MetricId;
import io.mantisrx.common.metrics.spectator.SpectatorRegistryFactory;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.CacheJobArtifactsOnTaskExecutorRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetClusterUsageRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.HeartbeatTimeout;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.InitializeTaskExecutorRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.TaskExecutorAssignmentRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.TaskExecutorGatewayRequest;
import io.mantisrx.master.resourcecluster.proto.GetClusterIdleInstancesRequest;
import io.mantisrx.server.master.resourcecluster.TaskExecutorDisconnection;
import io.mantisrx.server.master.resourcecluster.TaskExecutorHeartbeat;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import io.vavr.Tuple2;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
@Slf4j
class ResourceClusterActorMetrics {
private static final String METRIC_GROUP_ID = "ResourceClusterActor";
public static final String NUM_REGISTERED_TE = "numRegisteredTaskExecutors";
public static final String NUM_BUSY_TE = "numBusyTaskExecutors";
public static final String NUM_AVAILABLE_TE = "numAvailableTaskExecutors";
public static final String NUM_DISABLED_TE = "numDisabledTaskExecutors";
public static final String NUM_UNREGISTERED_TE = "numUnregisteredTaskExecutors";
public static final String NUM_ASSIGNED_TE = "numAssignedTaskExecutors";
public static final String NO_RESOURCES_AVAILABLE = "noResourcesAvailable";
public static final String HEARTBEAT_TIMEOUT = "taskExecutorHeartbeatTimeout";
public static final String TE_CONNECTION_FAILURE = "taskExecutorConnectionFailure";
public static final String MAX_JOB_ARTIFACTS_TO_CACHE_REACHED = "maxJobArtifactsToCacheReached";
private final Registry registry;
private final Map<Class<?>, Tuple2<Counter, Timer>> messageMetrics;
private final Tuple2<Counter, Timer> unknownMessageMetrics;
private Id getMessageReceivedId(String messageName) {
return new MetricId(METRIC_GROUP_ID, "messagesReceived",
Tag.of("messageType", messageName)).getSpectatorId(registry);
}
private Id getMessageProcessingLatencyId(String messageName) {
return new MetricId(METRIC_GROUP_ID, "messageProcessingLatency",
Tag.of("messageType", messageName)).getSpectatorId(registry);
}
private Tuple2<Counter, Timer> getBoth(String messageName) {
return new Tuple2<>(
registry.counter(getMessageReceivedId(messageName)),
registry.timer(getMessageProcessingLatencyId(messageName)));
}
public ResourceClusterActorMetrics() {
this.registry = SpectatorRegistryFactory.getRegistry();
this.messageMetrics = ImmutableMap.of(
TaskExecutorRegistration.class, getBoth("TaskExecutorRegistration"),
InitializeTaskExecutorRequest.class, getBoth("InitializeTaskExecutorRequest"),
TaskExecutorHeartbeat.class, getBoth("TaskExecutorHeartbeat"),
TaskExecutorDisconnection.class, getBoth("TaskExecutorDisconnection"),
HeartbeatTimeout.class, getBoth("HeartbeatTimeout"),
TaskExecutorAssignmentRequest.class, getBoth("TaskExecutorAssignmentRequest"),
TaskExecutorGatewayRequest.class, getBoth("TaskExecutorGatewayRequest"),
CacheJobArtifactsOnTaskExecutorRequest.class,
getBoth("CacheJobArtifactsOnTaskExecutorRequest"),
GetClusterUsageRequest.class, getBoth("GetClusterUsageRequest"),
GetClusterIdleInstancesRequest.class, getBoth("GetClusterIdleInstancesRequest")
);
this.unknownMessageMetrics = getBoth("UnknownMessage");
}
public void setGauge(final String metric, final long value, final Iterable<Tag> tags) {
registry.gauge(new MetricId(METRIC_GROUP_ID, metric, tags).getSpectatorId(registry))
.set(value);
}
public void incrementCounter(final String metric, final Iterable<Tag> tags) {
registry.counter(new MetricId(METRIC_GROUP_ID, metric, tags).getSpectatorId(registry))
.increment();
}
public <P> FI.UnitApply<P> withTracking(final FI.UnitApply<P> apply) {
return p -> {
final long start = System.nanoTime();
try {
apply.apply(p);
} finally {
final Class<?> pClass = p.getClass();
messageMetrics.getOrDefault(pClass, unknownMessageMetrics)
.apply((counter, timer) -> {
counter.increment();
timer.record(System.nanoTime() - start, TimeUnit.NANOSECONDS);
return null;
});
}
};
}
}
| 8,089 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/ResourceClusterScalerActor.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import akka.actor.AbstractActorWithTimers;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.japi.pf.ReceiveBuilder;
import com.netflix.spectator.api.BasicTag;
import io.mantisrx.common.Ack;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetClusterUsageRequest;
import io.mantisrx.master.resourcecluster.proto.GetClusterIdleInstancesRequest;
import io.mantisrx.master.resourcecluster.proto.GetClusterIdleInstancesResponse;
import io.mantisrx.master.resourcecluster.proto.GetClusterUsageResponse;
import io.mantisrx.master.resourcecluster.proto.GetClusterUsageResponse.UsageByGroupKey;
import io.mantisrx.master.resourcecluster.proto.ResourceClusterScaleSpec;
import io.mantisrx.master.resourcecluster.proto.ScaleResourceRequest;
import io.mantisrx.master.resourcecluster.proto.SetResourceClusterScalerStatusRequest;
import io.mantisrx.master.resourcecluster.writable.ResourceClusterScaleRulesWritable;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.io.IOException;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.Collections;
import java.util.HashSet;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
import java.util.stream.Collectors;
import lombok.Builder;
import lombok.Value;
import lombok.extern.slf4j.Slf4j;
/**
* This actor is responsible to handle message regarding cluster usage and makes scaling decisions.
* [Notes] There can be two communication model between the scaler actor and resource cluster actor. If the state is
* pushed from resource cluster actor to the scaler actor, the downside is we need to ensure all changes are properly
* handled and can trigger the push, while pulling state from scaler actor requires explicit timer firing.
*/
@Slf4j
public class ResourceClusterScalerActor extends AbstractActorWithTimers {
private final ClusterID clusterId;
// Timer threshold of pulling cluster usage.
private final Duration scalerPullThreshold;
// Timer threshold of refreshing cluster scale rules from storage provider.
private final Duration ruleSetRefreshThreshold;
private final ActorRef resourceClusterActor;
private final ActorRef resourceClusterHostActor;
private final IMantisPersistenceProvider storageProvider;
private final ConcurrentMap<ContainerSkuID, ClusterAvailabilityRule> skuToRuleMap = new ConcurrentHashMap<>();
private final Clock clock;
private final Counter numScaleUp;
private final Counter numScaleDown;
private final Counter numReachScaleMaxLimit;
private final Counter numReachScaleMinLimit;
private final Counter numScaleRuleTrigger;
public static Props props(
ClusterID clusterId,
Clock clock,
Duration scalerPullThreshold,
Duration ruleRefreshThreshold,
IMantisPersistenceProvider storageProvider,
ActorRef resourceClusterHostActor,
ActorRef resourceClusterActor) {
return Props.create(
ResourceClusterScalerActor.class,
clusterId,
clock,
scalerPullThreshold,
ruleRefreshThreshold,
storageProvider,
resourceClusterHostActor,
resourceClusterActor);
}
public ResourceClusterScalerActor(
ClusterID clusterId,
Clock clock,
Duration scalerPullThreshold,
Duration ruleRefreshThreshold,
IMantisPersistenceProvider storageProvider,
ActorRef resourceClusterHostActor,
ActorRef resourceClusterActor) {
this.clusterId = clusterId;
this.resourceClusterActor = resourceClusterActor;
this.resourceClusterHostActor = resourceClusterHostActor;
this.storageProvider = storageProvider;
this.clock = clock;
this.scalerPullThreshold = scalerPullThreshold;
this.ruleSetRefreshThreshold = ruleRefreshThreshold;
MetricGroupId metricGroupId = new MetricGroupId(
"ResourceClusterScalerActor",
new BasicTag("resourceCluster", this.clusterId.getResourceID()));
Metrics m = new Metrics.Builder()
.id(metricGroupId)
.addCounter("numScaleDown")
.addCounter("numReachScaleMaxLimit")
.addCounter("numScaleUp")
.addCounter("numReachScaleMinLimit")
.addCounter("numScaleRuleTrigger")
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
this.numScaleDown = m.getCounter("numScaleDown");
this.numReachScaleMaxLimit = m.getCounter("numReachScaleMaxLimit");
this.numScaleUp = m.getCounter("numScaleUp");
this.numReachScaleMinLimit = m.getCounter("numReachScaleMinLimit");
this.numScaleRuleTrigger = m.getCounter("numScaleRuleTrigger");
}
@Override
public Receive createReceive() {
return
ReceiveBuilder
.create()
.match(TriggerClusterUsageRequest.class, this::onTriggerClusterUsageRequest)
.match(TriggerClusterRuleRefreshRequest.class, this::onTriggerClusterRuleRefreshRequest)
.match(GetRuleSetRequest.class,
req -> getSender().tell(
GetRuleSetResponse.builder().rules(ImmutableMap.copyOf(this.skuToRuleMap)).build(), self()))
.match(GetClusterUsageResponse.class, this::onGetClusterUsageResponse)
.match(GetClusterIdleInstancesResponse.class, this::onGetClusterIdleInstancesResponse)
.match(GetRuleSetResponse.class,
s -> log.info("[{}] Refreshed rule size: {}", s.getClusterID(), s.getRules().size()))
.match(SetResourceClusterScalerStatusRequest.class, req -> {
onSetScalerStatus(req);
getSender().tell(Ack.getInstance(), self());
})
.match(ExpireSetScalerStatusRequest.class, this::onExpireSetScalerStatus)
.match(Ack.class, ack -> log.info("Received ack from {}", sender()))
.build();
}
@Override
public void preStart() throws Exception {
super.preStart();
log.info("ResourceClusterScaler Actor {} starting", this.clusterId);
this.fetchRuleSet();
getTimers().startTimerWithFixedDelay(
"ClusterScaler-" + this.clusterId,
new TriggerClusterUsageRequest(this.clusterId),
scalerPullThreshold);
getTimers().startTimerWithFixedDelay(
"ClusterScalerRuleFetcher-" + this.clusterId,
new TriggerClusterRuleRefreshRequest(this.clusterId),
this.ruleSetRefreshThreshold);
}
private void onGetClusterUsageResponse(GetClusterUsageResponse usageResponse) {
log.info("Getting cluster usage: {}", usageResponse);
this.numScaleRuleTrigger.increment();
// get usage by mDef
// for each mdef: locate rule for the mdef, apply rule if under coolDown.
// update coolDown timer.
// 1 matcher for usage and rule.
// 2 rule apply to usage.
// 3 translate between decision to scale request. (inline for now)
usageResponse.getUsages().forEach(usage -> {
ContainerSkuID skuId = ContainerSkuID.of(usage.getUsageGroupKey());
if (this.skuToRuleMap.containsKey(skuId) && skuToRuleMap.get(skuId).isEnabled()) {
Optional<ScaleDecision> decisionO = this.skuToRuleMap.get(skuId).apply(usage);
if (decisionO.isPresent()) {
log.info("Informing scale decision: {}", decisionO.get());
switch (decisionO.get().getType()) {
case ScaleDown:
log.info("Scaling down, fetching idle instances.");
this.numScaleDown.increment();
this.resourceClusterActor.tell(
GetClusterIdleInstancesRequest.builder()
.clusterID(this.clusterId)
.skuId(skuId)
.desireSize(decisionO.get().getDesireSize())
.maxInstanceCount(
Math.max(0, usage.getTotalCount() - decisionO.get().getDesireSize()))
.build(),
self());
break;
case ScaleUp:
log.info("Scaling up, informing host actor: {}", decisionO.get());
this.numScaleUp.increment();
this.resourceClusterHostActor.tell(translateScaleDecision(decisionO.get()), self());
break;
case NoOpReachMax:
this.numReachScaleMaxLimit.increment();
break;
case NoOpReachMin:
this.numReachScaleMinLimit.increment();
break;
default:
throw new RuntimeException("Invalid scale type: " + decisionO);
}
}
} else {
log.info("Either scaling is disabled for sku or no sku rule is available for {}: {}", this.clusterId, usage.getUsageGroupKey());
}
});
getSender().tell(Ack.getInstance(), self());
}
private void onGetClusterIdleInstancesResponse(GetClusterIdleInstancesResponse response) {
log.info("On GetClusterIdleInstancesResponse, informing host actor: {}", response);
this.resourceClusterHostActor.tell(
ScaleResourceRequest.builder()
.clusterId(this.clusterId)
.skuId(response.getSkuId())
.desireSize(response.getDesireSize())
.idleInstances(response.getInstanceIds())
.build(),
self());
// also disable the scale down targets to avoid them being used during the scale down process.
response.getInstanceIds().forEach(id ->
this.resourceClusterActor.tell(new DisableTaskExecutorsRequest(
Collections.emptyMap(),
this.clusterId,
Instant.now().plus(Duration.ofHours(24)),
Optional.of(id)),
self()
));
}
private void onTriggerClusterUsageRequest(TriggerClusterUsageRequest req) {
log.trace("Requesting cluster usage: {}", this.clusterId);
if (this.skuToRuleMap.isEmpty()) {
log.info("{} scaler is disabled due to no rules", this.clusterId);
return;
}
this.resourceClusterActor.tell(
new GetClusterUsageRequest(
this.clusterId, ResourceClusterScalerActor.groupKeyFromTaskExecutorDefinitionIdFunc),
self());
}
private void onTriggerClusterRuleRefreshRequest(TriggerClusterRuleRefreshRequest req) {
log.info("{}: Requesting cluster rule refresh", this.clusterId);
this.fetchRuleSet();
}
private void fetchRuleSet() {
try {
ResourceClusterScaleRulesWritable rules =
this.storageProvider.getResourceClusterScaleRules(this.clusterId);
Set<ContainerSkuID> removedKeys = new HashSet<>(this.skuToRuleMap.keySet());
final Set<ContainerSkuID> preservedKeys = rules.getScaleRules().keySet().stream()
.map(ContainerSkuID::of).collect(Collectors.toSet());
removedKeys.removeAll(preservedKeys);
removedKeys.forEach(this.skuToRuleMap::remove);
rules
.getScaleRules().values()
.forEach(scaleRule -> {
log.info("Cluster [{}]: Adding scaleRule: {}", this.clusterId, scaleRule);
final ClusterAvailabilityRule clusterAvailabilityRule = createClusterAvailabilityRule(
scaleRule, this.skuToRuleMap.get(scaleRule.getSkuId()));
this.skuToRuleMap.put(scaleRule.getSkuId(), clusterAvailabilityRule);
});
GetRuleSetResponse fetchFut =
GetRuleSetResponse.builder()
.rules(ImmutableMap.copyOf(this.skuToRuleMap))
.clusterID(this.clusterId)
.build();
self().tell(fetchFut, self());
} catch (IOException e) {
log.error("Failed to fetch rule set for cluster: {}", this.clusterId, e);
}
}
private ClusterAvailabilityRule createClusterAvailabilityRule(ResourceClusterScaleSpec scaleSpec, ClusterAvailabilityRule existingRule) {
if (existingRule == null) {
return new ClusterAvailabilityRule(scaleSpec, this.clock, Instant.MIN, true);
}
// If rule exists already, port over lastActionInstant and enabled from existing rule
return new ClusterAvailabilityRule(scaleSpec, this.clock, existingRule.lastActionInstant, existingRule.enabled);
}
private void onSetScalerStatus(SetResourceClusterScalerStatusRequest req) {
if (skuToRuleMap.containsKey(req.getSkuId())) {
skuToRuleMap.get(req.getSkuId()).setEnabled(req.getEnabled());
if (!req.getEnabled()) {
// setup a timer to re-enable autoscaling after a given period
setExpireSetScalerStatusRequestTimer(new ExpireSetScalerStatusRequest(req));
}
}
}
private void onExpireSetScalerStatus(ExpireSetScalerStatusRequest req) {
log.info("Expiration set scaler status request: {}", req);
// re-enable autoscaling if it's been disabled for longer than threshold
final ContainerSkuID skuID = req.request.getSkuId();
final ClusterAvailabilityRule rule = skuToRuleMap.get(skuID);
if (rule != null && !rule.isEnabled()) {
if (!skuToRuleMap.get(skuID).isLastActionOlderThan(req.getRequest().getExpirationDurationInSeconds())) {
skuToRuleMap.get(skuID).setEnabled(true);
} else {
// try again later
setExpireSetScalerStatusRequestTimer(req);
}
}
}
private void setExpireSetScalerStatusRequestTimer(ExpireSetScalerStatusRequest req) {
getTimers().startSingleTimer(
"ExpireSetScalerStatusRequest-" + clusterId,
req,
Duration.ofSeconds(req.getRequest().getExpirationDurationInSeconds()));
}
private ScaleResourceRequest translateScaleDecision(ScaleDecision decision) {
return ScaleResourceRequest.builder()
.clusterId(this.clusterId)
.skuId(decision.getSkuId())
.desireSize(decision.getDesireSize())
.build();
}
@Value
@Builder
static class TriggerClusterUsageRequest {
ClusterID clusterID;
}
@Value
static class ExpireSetScalerStatusRequest {
SetResourceClusterScalerStatusRequest request;
}
@Value
@Builder
static class TriggerClusterRuleRefreshRequest {
ClusterID clusterID;
}
@Value
@Builder
static class GetRuleSetRequest {
ClusterID clusterID;
}
@Value
@Builder
static class GetRuleSetResponse {
ClusterID clusterID;
ImmutableMap<ContainerSkuID, ClusterAvailabilityRule> rules;
}
static class ClusterAvailabilityRule {
private final ResourceClusterScaleSpec scaleSpec;
private final Clock clock;
private Instant lastActionInstant;
private boolean enabled;
public ClusterAvailabilityRule(ResourceClusterScaleSpec scaleSpec, Clock clock, Instant lastActionInstant, Boolean enabled) {
this.scaleSpec = scaleSpec;
this.clock = clock;
// TODO: probably we should use current time
this.lastActionInstant = lastActionInstant;
this.enabled = enabled;
}
private void resetLastActionInstant() {
log.debug("resetLastActionInstant: {}, {}", this.scaleSpec.getClusterId(), this.scaleSpec.getSkuId());
lastActionInstant = clock.instant();
}
public void setEnabled(boolean enabled) {
log.debug("setEnabled: {}, {}, {}", enabled, this.scaleSpec.getClusterId(), this.scaleSpec.getSkuId());
this.enabled = enabled;
resetLastActionInstant();
}
public boolean isEnabled() { return enabled; }
public boolean isLastActionOlderThan(long secondsSinceLastAction) {
log.debug("[isLastActionOlderThan] secondsSinceLastAction: {}, {}, {}", secondsSinceLastAction, this.scaleSpec.getClusterId(), this.scaleSpec.getSkuId());
log.debug("[isLastActionOlderThan] lastActionInstant: {}, {}, {}", lastActionInstant, this.scaleSpec.getClusterId(), this.scaleSpec.getSkuId());
log.debug("[isLastActionOlderThan] lastActionInstant + secondsSinceLastAction: {}, {}, {}", lastActionInstant.plusSeconds(secondsSinceLastAction), this.scaleSpec.getClusterId(), this.scaleSpec.getSkuId());
log.debug("[isLastActionOlderThan] comp: {}, {}, {}", lastActionInstant.plusSeconds(secondsSinceLastAction).compareTo(clock.instant()) > 0, this.scaleSpec.getClusterId(), this.scaleSpec.getSkuId());
return lastActionInstant.plusSeconds(secondsSinceLastAction).compareTo(clock.instant()) > 0;
}
public Optional<ScaleDecision> apply(UsageByGroupKey usage) {
Optional<ScaleDecision> decision = Optional.empty();
if (usage.getIdleCount() > scaleSpec.getMaxIdleToKeep()) {
// Cool down check: for scaling down we want to wait 5x the nominal cool down period
if (isLastActionOlderThan(scaleSpec.getCoolDownSecs() * 5)) {
log.debug("Scale Down CoolDown skip: {}, {}", this.scaleSpec.getClusterId(), this.scaleSpec.getSkuId());
return Optional.empty();
}
// too many idle agents, scale down.
int step = usage.getIdleCount() - scaleSpec.getMaxIdleToKeep();
int newSize = Math.max(
usage.getTotalCount() - step, this.scaleSpec.getMinSize());
decision = Optional.of(
ScaleDecision.builder()
.clusterId(this.scaleSpec.getClusterId())
.skuId(this.scaleSpec.getSkuId())
.desireSize(newSize)
.maxSize(newSize)
.minSize(newSize)
.type(newSize == usage.getTotalCount() ? ScaleType.NoOpReachMin : ScaleType.ScaleDown)
.build());
}
else if (usage.getIdleCount() < scaleSpec.getMinIdleToKeep()) {
// Cool down check
if (isLastActionOlderThan(scaleSpec.getCoolDownSecs())) {
log.debug("Scale Up CoolDown skip: {}, {}", this.scaleSpec.getClusterId(), this.scaleSpec.getSkuId());
return Optional.empty();
}
// scale up
int step = scaleSpec.getMinIdleToKeep() - usage.getIdleCount();
int newSize = Math.min(
usage.getTotalCount() + step, this.scaleSpec.getMaxSize());
decision = Optional.of(
ScaleDecision.builder()
.clusterId(this.scaleSpec.getClusterId())
.skuId(this.scaleSpec.getSkuId())
.desireSize(newSize)
.maxSize(newSize)
.minSize(newSize)
.type(newSize == usage.getTotalCount() ? ScaleType.NoOpReachMax : ScaleType.ScaleUp)
.build());
}
log.info("Scale Decision for {}-{}: {}",
this.scaleSpec.getClusterId(), this.scaleSpec.getSkuId(), decision);
// reset last action only if we decided to scale up or down
if (decision.isPresent() && (decision.get().type.equals(ScaleType.ScaleDown) || decision.get().type.equals(ScaleType.ScaleUp))) {
log.debug("Ongoing scale operation. Resetting last action timer: {}, {}", this.scaleSpec.getClusterId(), this.scaleSpec.getSkuId());
resetLastActionInstant();
}
return decision;
}
}
enum ScaleType {
NoOpReachMax,
NoOpReachMin,
ScaleUp,
ScaleDown,
}
@Value
@Builder
static class ScaleDecision {
ContainerSkuID skuId;
ClusterID clusterId;
int maxSize;
int minSize;
int desireSize;
ScaleType type;
}
/**
* {@link TaskExecutorRegistration} holds task attribute map in which the container sku ID's resource id is stored
* as a string. Here the key function is used to retrieve the map this string as grouping kye.
*/
static Function<TaskExecutorRegistration, Optional<String>> groupKeyFromTaskExecutorDefinitionIdFunc =
reg -> reg.getTaskExecutorContainerDefinitionId().map(id -> id.getResourceID());
}
| 8,090 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/ExecutorStateManagerImpl.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import io.mantisrx.common.WorkerConstants;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetActiveJobsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetClusterUsageRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.TaskExecutorAssignmentRequest;
import io.mantisrx.master.resourcecluster.proto.GetClusterIdleInstancesRequest;
import io.mantisrx.master.resourcecluster.proto.GetClusterUsageResponse;
import io.mantisrx.master.resourcecluster.proto.GetClusterUsageResponse.GetClusterUsageResponseBuilder;
import io.mantisrx.master.resourcecluster.proto.GetClusterUsageResponse.UsageByGroupKey;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import io.mantisrx.server.master.resourcecluster.ResourceCluster.ResourceOverview;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.shaded.com.google.common.cache.Cache;
import io.mantisrx.shaded.com.google.common.cache.CacheBuilder;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NavigableSet;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import lombok.Builder;
import lombok.Value;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.tuple.Pair;
@Slf4j
class ExecutorStateManagerImpl implements ExecutorStateManager {
private final Map<TaskExecutorID, TaskExecutorState> taskExecutorStateMap = new HashMap<>();
/**
* Cache the available executors ready to accept assignments. Note these executors' state are not strongly
* synchronized and requires state level check when matching.
*/
private final SortedMap<Double, NavigableSet<TaskExecutorHolder>> executorByCores = new ConcurrentSkipListMap<>();
private final Cache<TaskExecutorID, TaskExecutorState> archivedState = CacheBuilder.newBuilder()
.maximumSize(10000)
.expireAfterWrite(24, TimeUnit.HOURS)
.removalListener(notification ->
log.info("Archived TaskExecutor: {} removed due to: {}", notification.getKey(), notification.getCause()))
.build();
@Override
public void trackIfAbsent(TaskExecutorID taskExecutorID, TaskExecutorState state) {
this.taskExecutorStateMap.putIfAbsent(taskExecutorID, state);
if (this.archivedState.getIfPresent(taskExecutorID) != null) {
log.info("Reviving archived executor: {}", taskExecutorID);
this.archivedState.invalidate(taskExecutorID);
}
tryMarkAvailable(taskExecutorID, state);
}
/**
* Add to buckets. however new executors won't have valid registration at this moment and requires marking
* again later when registration is ready.
* @param taskExecutorID taskExecutorID
* @param state state
* @return whether the target executor is marked as available.
*/
private boolean tryMarkAvailable(TaskExecutorID taskExecutorID, TaskExecutorState state) {
if (state.isAvailable() && state.getRegistration() != null) {
TaskExecutorHolder teHolder = TaskExecutorHolder.of(taskExecutorID, state.getRegistration());
log.debug("Marking executor {} as available for matching.", teHolder);
double cpuCores = state.getRegistration().getMachineDefinition().getCpuCores();
if (!this.executorByCores.containsKey(cpuCores)) {
this.executorByCores.putIfAbsent(
cpuCores,
new TreeSet<>(TaskExecutorHolder.generationFirstComparator));
}
log.info("Assign {} to available.", teHolder.getId());
return this.executorByCores.get(cpuCores).add(teHolder);
}
else {
log.debug("Ignore unavailable TE: {}", taskExecutorID);
return false;
}
}
@Override
public boolean tryMarkAvailable(TaskExecutorID taskExecutorID) {
if (!this.taskExecutorStateMap.containsKey(taskExecutorID)) {
log.warn("marking invalid executor as available: {}", taskExecutorID);
return false;
}
TaskExecutorState taskExecutorState = this.taskExecutorStateMap.get(taskExecutorID);
return tryMarkAvailable(taskExecutorID, taskExecutorState);
}
@Override
public boolean tryMarkUnavailable(TaskExecutorID taskExecutorID) {
if (this.taskExecutorStateMap.containsKey(taskExecutorID)) {
TaskExecutorState taskExecutorState = this.taskExecutorStateMap.get(taskExecutorID);
if (taskExecutorState.getRegistration() != null) {
double cpuCores = taskExecutorState.getRegistration().getMachineDefinition().getCpuCores();
if (this.executorByCores.containsKey(cpuCores)) {
this.executorByCores.get(cpuCores)
.remove(TaskExecutorHolder.of(taskExecutorID, taskExecutorState.getRegistration()));
}
return true;
}
}
// todo: check archive map as well?
log.warn("invalid task executor to mark as unavailable: {}", taskExecutorID);
return false;
}
@Override
public ResourceOverview getResourceOverview() {
long numRegistered = taskExecutorStateMap.values().stream().filter(TaskExecutorState::isRegistered).count();
long numAvailable = taskExecutorStateMap.values().stream().filter(TaskExecutorState::isAvailable).count();
long numOccupied = taskExecutorStateMap.values().stream().filter(TaskExecutorState::isRunningTask).count();
long numAssigned = taskExecutorStateMap.values().stream().filter(TaskExecutorState::isAssigned).count();
long numDisabled = taskExecutorStateMap.values().stream().filter(TaskExecutorState::isDisabled).count();
return new ResourceOverview(numRegistered, numAvailable, numOccupied, numAssigned, numDisabled);
}
@Override
public List<TaskExecutorID> getIdleInstanceList(GetClusterIdleInstancesRequest req) {
return this.taskExecutorStateMap.entrySet().stream()
.filter(kv -> {
if (kv.getValue().getRegistration() == null) {
return false;
}
Optional<ContainerSkuID> skuIdO =
kv.getValue().getRegistration().getTaskExecutorContainerDefinitionId();
return skuIdO.isPresent() && skuIdO.get().equals(req.getSkuId());
})
.filter(isAvailable)
.map(Entry::getKey)
.limit(req.getMaxInstanceCount())
.collect(Collectors.toList());
}
@Override
public TaskExecutorState get(TaskExecutorID taskExecutorID) {
return this.taskExecutorStateMap.get(taskExecutorID);
}
@Override
public TaskExecutorState getIncludeArchived(TaskExecutorID taskExecutorID) {
if (this.taskExecutorStateMap.containsKey(taskExecutorID)) {
return this.taskExecutorStateMap.get(taskExecutorID);
}
return this.archivedState.getIfPresent(taskExecutorID);
}
@Override
public TaskExecutorState archive(TaskExecutorID taskExecutorID) {
if (this.taskExecutorStateMap.containsKey(taskExecutorID)) {
this.archivedState.put(taskExecutorID, this.taskExecutorStateMap.get(taskExecutorID));
this.taskExecutorStateMap.remove(taskExecutorID);
return this.archivedState.getIfPresent(taskExecutorID);
}
else {
log.warn("archiving invalid TaskExecutor: {}", taskExecutorID);
return null;
}
}
@Override
public List<TaskExecutorID> getTaskExecutors(Predicate<Entry<TaskExecutorID, TaskExecutorState>> predicate) {
return this.taskExecutorStateMap
.entrySet()
.stream()
.filter(predicate)
.map(Entry::getKey)
.collect(Collectors.toList());
}
@Override
public List<String> getActiveJobs(GetActiveJobsRequest req) {
return this.taskExecutorStateMap
.values()
.stream()
.map(TaskExecutorState::getWorkerId)
.filter(Objects::nonNull)
.map(WorkerId::getJobId)
.distinct()
.sorted((String::compareToIgnoreCase))
.skip(req.getStartingIndex().orElse(0))
.limit(req.getPageSize().orElse(3000))
.collect(Collectors.toList());
}
@Override
public Optional<Entry<TaskExecutorID, TaskExecutorState>> findFirst(
Predicate<Entry<TaskExecutorID, TaskExecutorState>> predicate) {
return taskExecutorStateMap
.entrySet()
.stream()
.filter(predicate)
.findFirst();
}
@Override
public Optional<Pair<TaskExecutorID, TaskExecutorState>> findBestFit(TaskExecutorAssignmentRequest request) {
// only allow allocation in the lowest CPU cores matching group.
SortedMap<Double, NavigableSet<TaskExecutorHolder>> targetMap =
this.executorByCores.tailMap(request.getAllocationRequest().getMachineDefinition().getCpuCores());
if (targetMap.isEmpty()) {
log.warn("Cannot find any executor for request: {}", request);
return Optional.empty();
}
Double targetCoreCount = targetMap.firstKey();
log.debug("Applying assignmentReq: {} to {} cores.", request, targetCoreCount);
Double requestedCoreCount = request.getAllocationRequest().getMachineDefinition().getCpuCores();
if (Math.abs(targetCoreCount - requestedCoreCount) > 1E-10) {
// this mismatch should not happen in production and indicates TE registration/spec problem.
log.warn("Requested core count mismatched. requested: {}, found: {} for {}", requestedCoreCount,
targetCoreCount,
request);
}
if (this.executorByCores.get(targetCoreCount).isEmpty()) {
log.warn("No available TE found for core count: {}, request: {}", targetCoreCount, request);
return Optional.empty();
}
return this.executorByCores.get(targetCoreCount)
.descendingSet()
.stream()
.filter(teHolder -> {
if (!this.taskExecutorStateMap.containsKey(teHolder.getId())) {
return false;
}
TaskExecutorState st = this.taskExecutorStateMap.get(teHolder.getId());
return st.isAvailable() &&
st.getRegistration() != null &&
st.getRegistration().getMachineDefinition().canFit(
request.getAllocationRequest().getMachineDefinition());
})
.findFirst()
.map(TaskExecutorHolder::getId)
.map(taskExecutorID -> Pair.of(taskExecutorID, this.taskExecutorStateMap.get(taskExecutorID)));
}
@Override
public Set<Entry<TaskExecutorID, TaskExecutorState>> getActiveExecutorEntry() {
return this.taskExecutorStateMap.entrySet();
}
@Override
public GetClusterUsageResponse getClusterUsage(GetClusterUsageRequest req) {
log.info("Computing cluster usage: {}", req.getClusterID());
// default grouping is containerSkuID to usage
Map<String, Pair<Integer, Integer>> usageByGroupKey = new HashMap<>();
taskExecutorStateMap.forEach((key, value) -> {
if (value == null ||
value.getRegistration() == null) {
log.info("Empty registration: {}, {}. Skip usage request.", req.getClusterID(), key);
return;
}
// do not count the disabled TEs.
if (value.isDisabled()) {
return;
}
Optional<String> groupKeyO =
req.getGroupKeyFunc().apply(value.getRegistration());
if (!groupKeyO.isPresent()) {
log.info("Empty groupKey from: {}, {}. Skip usage request.", req.getClusterID(), key);
return;
}
String groupKey = groupKeyO.get();
Pair<Integer, Integer> kvState = Pair.of(
value.isAvailable() ? 1 : 0,
value.isRegistered() ? 1 : 0);
if (usageByGroupKey.containsKey(groupKey)) {
Pair<Integer, Integer> prevState = usageByGroupKey.get(groupKey);
usageByGroupKey.put(
groupKey,
Pair.of(
kvState.getLeft() + prevState.getLeft(), kvState.getRight() + prevState.getRight()));
} else {
usageByGroupKey.put(groupKey, kvState);
}
});
GetClusterUsageResponseBuilder resBuilder = GetClusterUsageResponse.builder().clusterID(req.getClusterID());
usageByGroupKey.forEach((key, value) -> resBuilder.usage(UsageByGroupKey.builder()
.usageGroupKey(key)
.idleCount(value.getLeft())
.totalCount(value.getRight())
.build()));
GetClusterUsageResponse res = resBuilder.build();
log.info("Usage result: {}", res);
return res;
}
/**
* Holder class in {@link ExecutorStateManagerImpl} to wrap task executor ID with other metatdata needed during
* scheduling e.g. generation.
*/
@Builder
@Value
protected static class TaskExecutorHolder {
TaskExecutorID Id;
String generation;
static TaskExecutorHolder of(TaskExecutorID id, TaskExecutorRegistration reg) {
String generation = reg.getAttributeByKey(WorkerConstants.MANTIS_WORKER_CONTAINER_GENERATION)
.orElse(reg.getAttributeByKey(WorkerConstants.AUTO_SCALE_GROUP_KEY).orElse("empty-generation"));
return TaskExecutorHolder.builder()
.Id(id)
.generation(generation)
.build();
}
static Comparator<TaskExecutorHolder> generationFirstComparator =
Comparator.comparing(TaskExecutorHolder::getGeneration)
.thenComparing(teh -> teh.getId().getResourceId());
}
}
| 8,091 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/ExecutorStateManager.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetActiveJobsRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.GetClusterUsageRequest;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.TaskExecutorAssignmentRequest;
import io.mantisrx.master.resourcecluster.proto.GetClusterIdleInstancesRequest;
import io.mantisrx.master.resourcecluster.proto.GetClusterUsageResponse;
import io.mantisrx.server.master.resourcecluster.ResourceCluster.ResourceOverview;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import java.util.List;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import java.util.function.Predicate;
import javax.annotation.Nullable;
import org.apache.commons.lang3.tuple.Pair;
/**
* A component to manage the states of {@link TaskExecutorState} for a given {@link ResourceClusterActor}.
*/
interface ExecutorStateManager {
/**
* Store and track the given task executor's state inside this {@link ExecutorStateManager} if there is no existing
* state already. Ignore the given state instance if there is already a state associated with the given ID.
* @param taskExecutorID TaskExecutorID
* @param state new task executor state
*/
void trackIfAbsent(TaskExecutorID taskExecutorID, TaskExecutorState state);
/**
* Try to mark the given task executor as available if its tracked state is available.
* @param taskExecutorID TaskExecutorID
* @return whether the given task executor became available.
*/
boolean tryMarkAvailable(TaskExecutorID taskExecutorID);
/**
* Try to mark the given task executor as unavailable.
* @param taskExecutorID TaskExecutorID
*/
boolean tryMarkUnavailable(TaskExecutorID taskExecutorID);
@Nullable
TaskExecutorState get(TaskExecutorID taskExecutorID);
@Nullable
TaskExecutorState getIncludeArchived(TaskExecutorID taskExecutorID);
@Nullable
TaskExecutorState archive(TaskExecutorID taskExecutorID);
ResourceOverview getResourceOverview();
GetClusterUsageResponse getClusterUsage(GetClusterUsageRequest req);
List<TaskExecutorID> getIdleInstanceList(GetClusterIdleInstancesRequest req);
List<TaskExecutorID> getTaskExecutors(Predicate<Entry<TaskExecutorID, TaskExecutorState>> predicate);
List<String> getActiveJobs(GetActiveJobsRequest req);
Optional<Entry<TaskExecutorID, TaskExecutorState>> findFirst(
Predicate<Entry<TaskExecutorID, TaskExecutorState>> predicate);
/**
* Find a matched task executor best fitting the given assignment request.
* @param request Assignment request.
* @return Optional of matched task executor.
*/
Optional<Pair<TaskExecutorID, TaskExecutorState>> findBestFit(TaskExecutorAssignmentRequest request);
Set<Entry<TaskExecutorID, TaskExecutorState>> getActiveExecutorEntry();
Predicate<Entry<TaskExecutorID, TaskExecutorState>> isRegistered =
e -> e.getValue().isRegistered();
Predicate<Entry<TaskExecutorID, TaskExecutorState>> isBusy =
e -> e.getValue().isRunningTask();
Predicate<Entry<TaskExecutorID, TaskExecutorState>> unregistered =
e -> e.getValue().isDisconnected();
Predicate<Entry<TaskExecutorID, TaskExecutorState>> isAvailable =
e -> e.getValue().isAvailable();
Predicate<Entry<TaskExecutorID, TaskExecutorState>> isDisabled =
e -> e.getValue().isDisabled();
Predicate<Entry<TaskExecutorID, TaskExecutorState>> isAssigned =
e -> e.getValue().isAssigned();
}
| 8,092 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/SetResourceClusterScalerStatusRequest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import lombok.Builder;
import lombok.NonNull;
import lombok.Value;
@Builder
@Value
public class SetResourceClusterScalerStatusRequest {
@NonNull
ContainerSkuID skuId;
@NonNull
Boolean enabled;
@NonNull
ClusterID clusterID;
@NonNull
Long expirationDurationInSeconds;
}
| 8,093 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/ListResourceClusterRequest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import lombok.Builder;
import lombok.Value;
@Builder
@Value
public class ListResourceClusterRequest {
}
| 8,094 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/DisableTaskExecutorsRequest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import lombok.NonNull;
import lombok.Value;
@Value
public class DisableTaskExecutorsRequest {
Map<String, String> attributes = new HashMap<>();
@NonNull
Long expirationDurationInHours;
Optional<TaskExecutorID> taskExecutorID;
}
| 8,095 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/ResourceClusterProvisionSubmissionResponse.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import lombok.Builder;
import lombok.Value;
/**
* Response contract for {@IResourceClusterProvider} provision request.
*/
@Builder
@Value
public class ResourceClusterProvisionSubmissionResponse {
String response;
Throwable error;
}
| 8,096 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/ScaleResourceRequest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.ContainerSkuID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.shaded.com.google.common.base.Joiner;
import java.util.List;
import java.util.Optional;
import lombok.Builder;
import lombok.Singular;
import lombok.Value;
@Builder
@Value
public class ScaleResourceRequest {
ClusterID clusterId;
ContainerSkuID skuId;
String region;
Optional<MantisResourceClusterEnvType> envType;
int desireSize;
@Singular
List<TaskExecutorID> idleInstances;
public String getScaleRequestId() {
return Joiner.on('-').join(
this.clusterId.getResourceID(),
this.region == null ? "" : this.region,
this.envType != null && this.envType.isPresent() ? this.getEnvType().get().name() : "",
this.skuId.getResourceID(),
this.desireSize);
}
}
| 8,097 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/ProvisionResourceClusterRequest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import lombok.Builder;
import lombok.Value;
/**
* Contract between API route and {@link io.mantisrx.master.resourcecluster.ResourceClustersHostManagerActor}.
*/
@Builder
@Value
public class ProvisionResourceClusterRequest {
ClusterID clusterId;
MantisResourceClusterSpec clusterSpec;
}
| 8,098 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/main/java/io/mantisrx/master/resourcecluster/proto/ResourceClusterAPIProto.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster.proto;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import lombok.Builder;
import lombok.Singular;
import lombok.Value;
public class ResourceClusterAPIProto {
// TODO: @Value generated equals doesn't include base class fields.
@Value
public static class ListResourceClustersResponse extends BaseResponse {
@Singular
List<RegisteredResourceCluster> registeredResourceClusters;
/** [Note] The @JsonCreator + @JasonProperty is needed when using this class with mixed shaded/non-shaded Jackson.
* The new @Jacksonized annotation is currently not usable with shaded Jackson here.
*/
@Builder
@JsonCreator
public ListResourceClustersResponse(
@JsonProperty("requestId") final long requestId,
@JsonProperty("responseCode") final ResponseCode responseCode,
@JsonProperty("message") final String message,
@JsonProperty("registeredResourceClusters") final List<RegisteredResourceCluster> registeredResourceClusters) {
super(requestId, responseCode, message);
this.registeredResourceClusters = registeredResourceClusters;
}
@Value
@Builder
public static class RegisteredResourceCluster {
ClusterID id;
String version;
@JsonCreator
public RegisteredResourceCluster(
@JsonProperty("id") final ClusterID id,
@JsonProperty("version") final String version) {
this.id = id;
this.version = version;
}
}
}
@Value
public static class GetResourceClusterResponse extends BaseResponse {
MantisResourceClusterSpec clusterSpec;
@Builder
@JsonCreator
public GetResourceClusterResponse(
@JsonProperty("requestId") final long requestId,
@JsonProperty("responseCode") final ResponseCode responseCode,
@JsonProperty("message") final String message,
@JsonProperty("clusterSpec") final MantisResourceClusterSpec clusterSpec) {
super(requestId, responseCode, message);
this.clusterSpec = clusterSpec;
}
}
@Value
public static class DeleteResourceClusterResponse extends BaseResponse {
@Builder
@JsonCreator
public DeleteResourceClusterResponse(
@JsonProperty("requestId") final long requestId,
@JsonProperty("responseCode") final ResponseCode responseCode,
@JsonProperty("message") final String message) {
super(requestId, responseCode, message);
}
}
@Builder
@Value
public static class DeleteResourceClusterRequest {
ClusterID clusterId;
}
}
| 8,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.