index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/utils/MantisUserClock.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.utils; public class MantisUserClock implements MantisClock { private volatile long currentTime = 0l; @Override public long now() { return currentTime; } public void setNow(final long timestamp) { currentTime = timestamp; } public void advanceTime(final long delta) { currentTime += delta; } }
4,300
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/mesos/VirtualMachineMasterServiceMesosImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.mesos; import java.net.URL; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import com.google.protobuf.ByteString; import com.netflix.fenzo.VirtualMachineLease; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.parameter.Parameter; import io.mantisrx.server.core.BaseService; import io.mantisrx.server.core.ExecuteStageRequest; import io.mantisrx.server.core.WorkerTopologyInfo; import io.mantisrx.server.core.domain.JobMetadata; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.LaunchTaskException; import io.mantisrx.server.master.VirtualMachineMasterService; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.config.MasterConfiguration; import io.mantisrx.server.master.scheduler.LaunchTaskRequest; import io.mantisrx.server.master.scheduler.ScheduleRequest; import org.apache.mesos.MesosSchedulerDriver; import org.apache.mesos.Protos; import org.apache.mesos.Protos.CommandInfo; import org.apache.mesos.Protos.ExecutorID; import org.apache.mesos.Protos.ExecutorInfo; import org.apache.mesos.Protos.Offer; import org.apache.mesos.Protos.Resource; import org.apache.mesos.Protos.TaskID; import org.apache.mesos.Protos.TaskInfo; import org.apache.mesos.Protos.Value; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.functions.Action0; public class VirtualMachineMasterServiceMesosImpl extends BaseService implements VirtualMachineMasterService { private static final Logger logger = LoggerFactory.getLogger(VirtualMachineMasterServiceMesosImpl.class); private final String masterDescriptionJson; private final Supplier<MesosSchedulerDriver> mesosDriver; private final AtomicBoolean initializationDone = new AtomicBoolean(false); private volatile int workerJvmMemoryScaleBackPct; private MasterConfiguration masterConfig; private ExecutorService executor; private ObjectMapper mapper = new ObjectMapper(); public VirtualMachineMasterServiceMesosImpl( final MasterConfiguration masterConfig, final String masterDescriptionJson, final Supplier<MesosSchedulerDriver> mesosSchedulerDriverSupplier) { super(true); this.masterConfig = masterConfig; this.masterDescriptionJson = masterDescriptionJson; this.mesosDriver = mesosSchedulerDriverSupplier; executor = Executors.newSingleThreadExecutor(new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r, "vm_master_mesos_scheduler_thread"); t.setDaemon(true); return t; } }); workerJvmMemoryScaleBackPct = Math.min(99, ConfigurationProvider.getConfig().getWorkerJvmMemoryScaleBackPercentage()); mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); } // NOTE: All leases are for the same agent. @Override public Map<ScheduleRequest, LaunchTaskException> launchTasks(List<LaunchTaskRequest> requests, List<VirtualMachineLease> leases) { if (!super.getIsInited()) { logger.error("Not in leader mode, not launching tasks"); return new HashMap<>(); } Protos.SlaveID slaveID = leases.get(0).getOffer().getSlaveId(); List<Protos.OfferID> offerIDs = new ArrayList<>(); for (VirtualMachineLease vml : leases) offerIDs.add(vml.getOffer().getId()); Map<ScheduleRequest, LaunchTaskException> errorResults = new HashMap<>(); List<TaskInfo> taskInfos = new ArrayList<>(); for (LaunchTaskRequest request : requests) { try { taskInfos.addAll(createTaskInfo(slaveID, request)); } catch (LaunchTaskException e) { errorResults.put(request.getScheduleRequest(), e); } } if (!taskInfos.isEmpty()) mesosDriver.get().launchTasks(offerIDs, taskInfos); else { // reject offers to prevent offer leak, but shouldn't happen for (VirtualMachineLease l : leases) { mesosDriver.get().declineOffer(l.getOffer().getId()); } } return errorResults; } @Override public void rejectLease(VirtualMachineLease lease) { if (!super.getIsInited()) { logger.error("Not in leader mode, not rejecting lease"); return; } VirtualMachineLeaseMesosImpl mesosLease = (VirtualMachineLeaseMesosImpl) lease; Offer offer = mesosLease.getOffer(); mesosDriver.get().declineOffer(offer.getId()); } private Collection<TaskInfo> createTaskInfo(Protos.SlaveID slaveID, final LaunchTaskRequest launchTaskRequest) throws LaunchTaskException { final ScheduleRequest scheduleRequest = launchTaskRequest.getScheduleRequest(); String name = scheduleRequest.getWorkerId().getJobCluster() + " (stage: " + scheduleRequest.getStageNum() + " of " + scheduleRequest.getJobMetadata().getTotalStages() + ")"; TaskID taskId = TaskID.newBuilder() .setValue(scheduleRequest.getWorkerId().getId()) .build(); MachineDefinition machineDefinition = scheduleRequest.getMachineDefinition(); // grab ports within range List<Integer> ports = launchTaskRequest.getPorts().getAllPorts(); TaskInfo taskInfo = null; try { TaskInfo.Builder taskInfoBuilder = TaskInfo.newBuilder(); ExecuteStageRequest executeStageRequest = new ExecuteStageRequest( scheduleRequest.getWorkerId().getJobCluster(), scheduleRequest.getWorkerId().getJobId(), scheduleRequest.getWorkerId().getWorkerIndex(), scheduleRequest.getWorkerId().getWorkerNum(), scheduleRequest.getJobMetadata().getJobJarUrl(), scheduleRequest.getStageNum(), scheduleRequest.getJobMetadata().getTotalStages(), ports, getTimeoutSecsToReportStart(), launchTaskRequest.getPorts().getMetricsPort(), scheduleRequest.getJobMetadata().getParameters(), scheduleRequest.getJobMetadata().getSchedulingInfo(), scheduleRequest.getDurationType(), scheduleRequest.getJobMetadata().getSubscriptionTimeoutSecs(), scheduleRequest.getJobMetadata().getMinRuntimeSecs() - (System.currentTimeMillis() - scheduleRequest.getJobMetadata().getMinRuntimeSecs()), launchTaskRequest.getPorts() ); taskInfoBuilder .setName(name) .setTaskId(taskId) .setSlaveId(slaveID) .addResources( Resource.newBuilder() .setName("cpus") .setType(Value.Type.SCALAR) .setScalar( Value.Scalar.newBuilder() .setValue(machineDefinition.getCpuCores()))) .addResources( Resource.newBuilder() .setName("mem") .setType(Value.Type.SCALAR) .setScalar( Value.Scalar.newBuilder() .setValue(machineDefinition.getMemoryMB()))) .addResources( Resource.newBuilder() .setName("disk") .setType(Value.Type.SCALAR) .setScalar( Value.Scalar.newBuilder() .setValue(machineDefinition.getDiskMB()))) .addResources( Resource.newBuilder() .setName("network") .setType(Value.Type.SCALAR) .setScalar( Value.Scalar.newBuilder() .setValue(machineDefinition.getNetworkMbps()) ) ) .setExecutor( createMantisWorkerExecutor(executeStageRequest, launchTaskRequest, machineDefinition.getMemoryMB(), machineDefinition.getCpuCores())) .setData( ByteString.copyFrom( mapper.writeValueAsBytes( executeStageRequest))); if (!ports.isEmpty()) { for (Integer port : ports) { // add ports taskInfoBuilder.addResources( Resource .newBuilder() .setName("ports") .setType(Value.Type.RANGES) .setRanges( Value.Ranges .newBuilder() .addRange(Value.Range.newBuilder() .setBegin(port) .setEnd(port)))); } } taskInfo = taskInfoBuilder.build(); } catch (JsonProcessingException e) { throw new LaunchTaskException("Failed to build a TaskInfo instance: " + e.getMessage(), e); } List<TaskInfo> tasks = new ArrayList<>(1); tasks.add(taskInfo); return tasks; } private int getMemSize(int original) { // If job asked for >999MB but <4000MB, subtract out 500 MB for JVM, meta_space, code_cache, etc. // leaving rest for the heap, Xmx. if (original < 4000) return original > 999 ? original - 500 : original; // If job asked for >4000, subtract out based on scale back percentage, but at least 500 MB return original - Math.max((int) (original * workerJvmMemoryScaleBackPct / 100.0), 500); } private ExecutorInfo createMantisWorkerExecutor(final ExecuteStageRequest executeStageRequest, final LaunchTaskRequest launchTaskRequest, final double memoryMB, final double cpuCores) { final int memSize = getMemSize((int) memoryMB); final int numCpu = (int) Math.ceil(cpuCores); final WorkerId workerId = launchTaskRequest.getScheduleRequest().getWorkerId(); String executorName = workerId.getId(); JobMetadata jobMetadata = launchTaskRequest.getScheduleRequest().getJobMetadata(); URL jobJarUrl = jobMetadata.getJobJarUrl(); Protos.Environment.Builder envBuilder = Protos.Environment.newBuilder() .addVariables( Protos.Environment.Variable.newBuilder() .setName("JOB_URL") .setValue(jobJarUrl.toString())) .addVariables( Protos.Environment.Variable.newBuilder() .setName("JOB_NAME") .setValue(executorName)) .addVariables( Protos.Environment.Variable.newBuilder() .setName("WORKER_LIB_DIR") .setValue(getWorkerLibDir())) .addVariables( Protos.Environment.Variable.newBuilder() .setName("JVM_MEMORY_MB") .setValue("" + (memSize)) ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("JVM_META_SPACE_MB") .setValue("100") ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("JVM_CODE_CACHE_SIZE_MB") .setValue("200") ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("JVM_COMP_CLASS_SIZE_MB") .setValue("100") ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("WORKER_INDEX") .setValue("" + (workerId.getWorkerIndex())) ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("WORKER_NUMBER") .setValue("" + (workerId.getWorkerNum())) ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("JOB_ID") .setValue(workerId.getJobId()) ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("MANTIS_WORKER_DEBUG_PORT") .setValue("" + launchTaskRequest.getPorts().getDebugPort()) ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("MANTIS_WORKER_CONSOLE_PORT") .setValue("" + launchTaskRequest.getPorts().getConsolePort()) ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("MANTIS_USER") .setValue("" + jobMetadata.getUser()) ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("STAGE_NUMBER") .setValue("" + launchTaskRequest.getScheduleRequest().getStageNum()) ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("NUM_CPU") .setValue("" + numCpu) ); // add worker info Map<String, String> envVars = new WorkerTopologyInfo.Writer(executeStageRequest).getEnvVars(); for (Map.Entry<String, String> entry : envVars.entrySet()) { envBuilder = envBuilder .addVariables( Protos.Environment.Variable.newBuilder() .setName(entry.getKey()) .setValue(entry.getValue()) ); } // add job parameters for (Parameter parameter : executeStageRequest.getParameters()) { if (parameter.getName() != null && parameter.getValue() != null) { envBuilder = envBuilder.addVariables( Protos.Environment.Variable.newBuilder() .setName(String.format("JOB_PARAM_" + parameter.getName())) .setValue(parameter.getValue()) ); } } // add ZooKeeper properties Protos.Environment env = envBuilder .addVariables( Protos.Environment.Variable.newBuilder() .setName("mantis.zookeeper.connectString") .setValue(masterConfig.getZkConnectionString()) ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("mantis.zookeeper.root") .setValue(masterConfig.getZkRoot()) ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("mantis.zookeeper.leader.announcement.path") .setValue(masterConfig.getLeaderAnnouncementPath()) ) .addVariables( Protos.Environment.Variable.newBuilder() .setName("MASTER_DESCRIPTION") .setValue(masterDescriptionJson)) .build(); return ExecutorInfo.newBuilder() .setExecutorId(ExecutorID.newBuilder().setValue(executorName)) .setCommand( CommandInfo.newBuilder() .setValue(getWorkerExecutorStartupScriptFullPath()) .setEnvironment(env)) .setName(getWorkerExecutorName()) .setSource(workerId.getJobId()) .build(); } @Override public void killTask(final WorkerId workerId) { if (!super.getIsInited()) { logger.error("Not in leader mode, not killing task"); return; } final String taskIdString = workerId.getId(); logger.info("Calling mesos to kill " + taskIdString); try { Protos.Status status = mesosDriver.get().killTask( TaskID.newBuilder() .setValue(taskIdString) .build()); logger.info("Kill status = " + status); switch (status) { case DRIVER_ABORTED: case DRIVER_STOPPED: logger.error("Unexpected to see Mesos driver status of " + status + " from kill task request. Committing suicide!"); System.exit(2); } } catch (RuntimeException e) { // IllegalStateException from no mesosDriver's addVMLeaseAction or NPE from mesosDriver.get() being null. logger.error("Unexpected to see Mesos driver not initialized", e); System.exit(2); } } @Override public void start() { super.awaitActiveModeAndStart(new Action0() { @Override public void call() { logger.info("Registering Mantis Framework with Mesos"); if (!initializationDone.compareAndSet(false, true)) throw new IllegalStateException("Duplicate start() call"); executor.execute(() -> { try { logger.info("invoking the Mesos driver run"); mesosDriver.get().run(); } catch (Exception e) { logger.error("Failed to register Mantis Framework with Mesos", e); System.exit(2); } }); } }); } @Override public void shutdown() { logger.info("Unregistering Mantis Framework with Mesos"); mesosDriver.get().stop(true); executor.shutdown(); } public String getMesosMasterHostAndPort() { return masterConfig.getMasterLocation(); } public String getWorkerInstallDir() { return masterConfig.getWorkerInstallDir(); } public String getWorkerLibDir() { return Paths.get(getWorkerInstallDir(), "libs").toString(); } private String getWorkerExecutorScript() { return masterConfig.getWorkerExecutorScript(); } private boolean getUseSlaveFiltering() { return masterConfig.getUseSlaveFiltering(); } private String getSlaveFilterAttributeName() { return masterConfig.getSlaveFilterAttributeName(); } public String getWorkerBinDir() { return Paths.get(getWorkerInstallDir(), "bin").toString(); } private String getWorkerExecutorStartupScriptFullPath() { return Paths.get(getWorkerBinDir(), getWorkerExecutorScript()).toString(); } public String getMantisFrameworkName() { return masterConfig.getMantisFrameworkName(); } public String getWorkerExecutorName() { return masterConfig.getWorkerExecutorName(); } public long getTimeoutSecsToReportStart() { return masterConfig.getTimeoutSecondsToReportStart(); } private double getMesosFailoverTimeoutSecs() { return masterConfig.getMesosFailoverTimeOutSecs(); } }
4,301
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/mesos/MesosDriverSupplier.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.mesos; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; import io.mantisrx.shaded.com.google.common.base.Preconditions; import com.netflix.fenzo.VirtualMachineLease; import io.mantisrx.server.master.config.MasterConfiguration; import io.mantisrx.server.master.scheduler.JobMessageRouter; import io.mantisrx.server.master.scheduler.WorkerRegistry; import org.apache.mesos.MesosSchedulerDriver; import org.apache.mesos.Protos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observer; import rx.functions.Action1; public class MesosDriverSupplier implements Supplier<MesosSchedulerDriver> { private static final Logger logger = LoggerFactory.getLogger(MesosDriverSupplier.class); private final MasterConfiguration masterConfig; private final Observer<String> vmLeaseRescindedObserver; private final JobMessageRouter jobMessageRouter; private final WorkerRegistry workerRegistry; private final AtomicReference<MesosSchedulerDriver> mesosDriverRef = new AtomicReference<>(null); private final AtomicBoolean isInitialized = new AtomicBoolean(false); private volatile Action1<List<VirtualMachineLease>> addVMLeaseAction = null; public MesosDriverSupplier(final MasterConfiguration masterConfig, final Observer<String> vmLeaseRescindedObserver, final JobMessageRouter jobMessageRouter, final WorkerRegistry workerRegistry) { this.masterConfig = masterConfig; this.vmLeaseRescindedObserver = vmLeaseRescindedObserver; this.jobMessageRouter = jobMessageRouter; this.workerRegistry = workerRegistry; } @Override public MesosSchedulerDriver get() { if (addVMLeaseAction == null) { throw new IllegalStateException("addVMLeaseAction must be set before creating MesosSchedulerDriver"); } if (isInitialized.compareAndSet(false, true)) { logger.info("initializing mesos scheduler callback handler"); final MesosSchedulerCallbackHandler mesosSchedulerCallbackHandler = new MesosSchedulerCallbackHandler(addVMLeaseAction, vmLeaseRescindedObserver, jobMessageRouter, workerRegistry); final Protos.FrameworkInfo framework = Protos.FrameworkInfo.newBuilder() .setUser("") .setName(masterConfig.getMantisFrameworkName()) .setFailoverTimeout(masterConfig.getMesosFailoverTimeOutSecs()) .setId(Protos.FrameworkID.newBuilder().setValue(masterConfig.getMantisFrameworkName())) .setCheckpoint(true) .build(); logger.info("initializing mesos scheduler driver"); final MesosSchedulerDriver mesosDriver = new MesosSchedulerDriver(mesosSchedulerCallbackHandler, framework, masterConfig.getMasterLocation()); mesosDriverRef.compareAndSet(null, mesosDriver); } return mesosDriverRef.get(); } public void setAddVMLeaseAction(final Action1<List<VirtualMachineLease>> addVMLeaseAction) { Preconditions.checkNotNull(addVMLeaseAction); this.addVMLeaseAction = addVMLeaseAction; } }
4,302
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/mesos/VirtualMachineLeaseMesosImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.mesos; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import com.netflix.fenzo.VirtualMachineLease; import org.apache.mesos.Protos; import org.apache.mesos.Protos.Offer; import org.apache.mesos.Protos.Resource; import org.apache.mesos.Protos.Value; public class VirtualMachineLeaseMesosImpl implements VirtualMachineLease { private Offer offer; private double cpuCores; private double memoryMB; private double networkMbps = 0.0; private double diskMB; private String hostname; private String vmID; private List<Range> portRanges; private Map<String, Protos.Attribute> attributeMap; private long offeredTime; public VirtualMachineLeaseMesosImpl(Offer offer) { this.offer = offer; hostname = offer.getHostname(); this.vmID = offer.getSlaveId().getValue(); offeredTime = System.currentTimeMillis(); // parse out resources from offer // We expect network bandwidth to be coming in as a consumable scalar resource with the name "network" for (Resource resource : offer.getResourcesList()) { if ("cpus".equals(resource.getName())) { cpuCores = resource.getScalar().getValue(); } else if ("mem".equals(resource.getName())) { memoryMB = resource.getScalar().getValue(); } else if ("disk".equals(resource.getName())) { diskMB = resource.getScalar().getValue(); } else if ("network".equals(resource.getName())) { networkMbps = resource.getScalar().getValue(); } else if ("ports".equals(resource.getName())) { portRanges = new ArrayList<>(); for (Value.Range range : resource.getRanges().getRangeList()) { portRanges.add(new Range((int) range.getBegin(), (int) range.getEnd())); } } } attributeMap = new HashMap<>(); if (offer.getAttributesCount() > 0) { for (Protos.Attribute attribute : offer.getAttributesList()) { attributeMap.put(attribute.getName(), attribute); } } } @Override public String hostname() { return hostname; } @Override public String getVMID() { return vmID; } @Override public double cpuCores() { return cpuCores; } @Override public double memoryMB() { return memoryMB; } @Override public double networkMbps() { return networkMbps; } @Override public double diskMB() { return diskMB; } public Offer getOffer() { return offer; } @Override public String getId() { return offer.getId().getValue(); } @Override public long getOfferedTime() { return offeredTime; } @Override public List<Range> portRanges() { return portRanges; } @Override public Map<String, Protos.Attribute> getAttributeMap() { return attributeMap; } @Override public Double getScalarValue(String name) { return null; } @Override public Map<String, Double> getScalarValues() { return Collections.emptyMap(); } }
4,303
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/mesos/MesosSchedulerCallbackHandler.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.mesos; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import io.mantisrx.shaded.com.google.common.base.Preconditions; import com.netflix.fenzo.VirtualMachineLease; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Gauge; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.scheduler.JobMessageRouter; import io.mantisrx.server.master.scheduler.WorkerRegistry; import io.mantisrx.server.master.scheduler.WorkerResourceStatus; import io.mantisrx.server.master.scheduler.WorkerResourceStatus.VMResourceState; import org.apache.mesos.Protos; import org.apache.mesos.Protos.ExecutorID; import org.apache.mesos.Protos.FrameworkID; import org.apache.mesos.Protos.MasterInfo; import org.apache.mesos.Protos.Offer; import org.apache.mesos.Protos.OfferID; import org.apache.mesos.Protos.SlaveID; import org.apache.mesos.Protos.TaskStatus; import org.apache.mesos.Scheduler; import org.apache.mesos.SchedulerDriver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.Observer; import rx.functions.Action1; public class MesosSchedulerCallbackHandler implements Scheduler { private static final Logger logger = LoggerFactory.getLogger(MesosSchedulerCallbackHandler.class); private final Action1<List<VirtualMachineLease>> addVMLeaseAction; private final WorkerRegistry workerRegistry; private final Gauge lastOfferReceivedMillis; private final Gauge lastValidOfferReceiveMillis; private final Counter numMesosRegistered; private final Counter numMesosDisconnects; private final Counter numOfferRescinded; private final Counter numReconcileTasks; private final Counter numInvalidOffers; private final Counter numOfferTooSmall; private Observer<String> vmLeaseRescindedObserver; private JobMessageRouter jobMessageRouter; private volatile ScheduledFuture reconcilerFuture = null; private AtomicLong lastOfferReceivedAt = new AtomicLong(System.currentTimeMillis()); private AtomicLong lastValidOfferReceivedAt = new AtomicLong(System.currentTimeMillis()); private long reconciliationTrial = 0; public MesosSchedulerCallbackHandler( final Action1<List<VirtualMachineLease>> addVMLeaseAction, final Observer<String> vmLeaseRescindedObserver, final JobMessageRouter jobMessageRouter, final WorkerRegistry workerRegistry) { this.addVMLeaseAction = Preconditions.checkNotNull(addVMLeaseAction); this.vmLeaseRescindedObserver = vmLeaseRescindedObserver; this.jobMessageRouter = jobMessageRouter; this.workerRegistry = workerRegistry; Metrics m = new Metrics.Builder() .name(MesosSchedulerCallbackHandler.class.getCanonicalName()) .addCounter("numMesosRegistered") .addCounter("numMesosDisconnects") .addCounter("numOfferRescinded") .addCounter("numReconcileTasks") .addGauge("lastOfferReceivedMillis") .addGauge("lastValidOfferReceiveMillis") .addCounter("numInvalidOffers") .addCounter("numOfferTooSmall") .build(); m = MetricsRegistry.getInstance().registerAndGet(m); numMesosRegistered = m.getCounter("numMesosRegistered"); numMesosDisconnects = m.getCounter("numMesosDisconnects"); numOfferRescinded = m.getCounter("numOfferRescinded"); numReconcileTasks = m.getCounter("numReconcileTasks"); lastOfferReceivedMillis = m.getGauge("lastOfferReceivedMillis"); lastValidOfferReceiveMillis = m.getGauge("lastValidOfferReceiveMillis"); numInvalidOffers = m.getCounter("numInvalidOffers"); numOfferTooSmall = m.getCounter("numOfferTooSmall"); Observable .interval(10, 10, TimeUnit.SECONDS) .doOnNext(aLong -> { lastOfferReceivedMillis.set(System.currentTimeMillis() - lastOfferReceivedAt.get()); lastValidOfferReceiveMillis.set(System.currentTimeMillis() - lastValidOfferReceivedAt.get()); }) .subscribe(); } // simple offer resource validator private boolean validateOfferResources(Offer offer) { for (Protos.Resource resource : offer.getResourcesList()) { if ("cpus".equals(resource.getName())) { final double cpus = resource.getScalar().getValue(); if (cpus < 0.1) { logger.warn("Declining offer due to too few CPUs in offer from " + offer.getHostname() + ": " + cpus); return false; } } else if ("mem".equals(resource.getName())) { double memoryMB = resource.getScalar().getValue(); if (memoryMB < 1) { logger.warn("Declining offer due to too few memory in offer from " + offer.getHostname() + ": " + memoryMB); return false; } } } return true; } @Override public void resourceOffers(SchedulerDriver driver, List<Offer> offers) { lastOfferReceivedAt.set(System.currentTimeMillis()); double refuseSecs = 10000; final List<VirtualMachineLease> leases = new ArrayList<>(); for (Offer offer : offers) { // if(!filterActiveVMs(offer)) { // // decline offer from inactive VMs // logger.info("Declining offer from host that is not active: " + offer.getHostname()); // driver.declineOffer(offer.getId(), (Protos.Filters.getDefaultInstance().toBuilder()).setRefuseSeconds(60).build()); // numInvalidOffers.increment(); // continue; // } if (ConfigurationProvider.getConfig().getUseSlaveFiltering()) { String attrName = ConfigurationProvider.getConfig().getSlaveFilterAttributeName(); String attrValue = null; if (offer.getAttributesCount() > 0) { for (Protos.Attribute attribute : offer.getAttributesList()) { if (attrName.equals(attribute.getName())) { attrValue = attribute.getText().getValue(); break; } } } if (attrValue == null || !attrValue.equals(System.getenv(attrName))) { driver.declineOffer(offer.getId(), (Protos.Filters.getDefaultInstance().toBuilder()).setRefuseSeconds(refuseSecs).build()); logger.warn("Declining offer from host " + offer.getHostname() + " due to missing attribute value for " + attrName + " - expecting [" + System.getenv(attrName) + "] got [" + attrValue + "]"); numInvalidOffers.increment(); continue; } } if (!validateOfferResources(offer)) { // decline for a minute driver.declineOffer(offer.getId(), (Protos.Filters.getDefaultInstance().toBuilder()).setRefuseSeconds(60).build()); numOfferTooSmall.increment(); continue; } leases.add(new VirtualMachineLeaseMesosImpl(offer)); lastValidOfferReceivedAt.set(System.currentTimeMillis()); } addVMLeaseAction.call(leases); } // private boolean filterActiveVMs(Offer offer) { // if(activeSlaveAttributeName==null || activeSlaveAttributeName.isEmpty()) // return true; // not filtering // final List<String> list = activeSlaveAttributeValuesGetter.call(); // if(list==null || list.isEmpty()) // return true; // all are active // if(offer.getAttributesCount()>0) { // for(Protos.Attribute attribute: offer.getAttributesList()) { // if(activeSlaveAttributeName.equals(attribute.getName())) { // if(isIn(attribute.getText().getValue(), list)) // return true; // } // } // } // else // logger.info("Filtering slave with no attributes: " + offer.getHostname()); // return false; // } private boolean isIn(String value, List<String> list) { if (value == null || value.isEmpty() || list == null || list.isEmpty()) return false; for (String s : list) if (value.equals(s)) return true; return false; } @Override public void disconnected(SchedulerDriver arg0) { logger.warn("Mesos driver disconnected: " + arg0); numMesosDisconnects.increment(); } @Override public void error(SchedulerDriver arg0, String msg) { logger.error("Error from Mesos: " + msg); } @Override public void executorLost(SchedulerDriver arg0, ExecutorID arg1, SlaveID arg2, int arg3) { logger.warn("Lost executor " + arg1.getValue() + " on slave " + arg2.getValue() + " with status=" + arg3); } @Override public void frameworkMessage(SchedulerDriver arg0, ExecutorID arg1, SlaveID arg2, byte[] arg3) { logger.warn("Unexpected framework message: executorId=" + arg1.getValue() + ", slaveID=" + arg2.getValue() + ", message=" + arg3); } @Override public void offerRescinded(SchedulerDriver arg0, OfferID arg1) { logger.warn("Offer rescinded: offerID=" + arg1.getValue()); vmLeaseRescindedObserver.onNext(arg1.getValue()); numOfferRescinded.increment(); } @Override public void registered(SchedulerDriver driver, FrameworkID frameworkID, MasterInfo masterInfo) { logger.info("Mesos registered: " + driver + ", ID=" + frameworkID.getValue() + ", masterInfo=" + masterInfo.getId()); initializeNewDriver(driver); numMesosRegistered.increment(); } @Override public void reregistered(SchedulerDriver driver, MasterInfo arg1) { logger.info("Mesos re-registered: " + driver + ", masterInfo=" + arg1.getId()); initializeNewDriver(driver); numMesosRegistered.increment(); } private synchronized void initializeNewDriver(final SchedulerDriver driver) { vmLeaseRescindedObserver.onNext("ALL"); if (reconcilerFuture != null) reconcilerFuture.cancel(true); reconcilerFuture = new ScheduledThreadPoolExecutor(1).scheduleWithFixedDelay(new Runnable() { @Override public void run() { reconcileTasks(driver); } }, 30, ConfigurationProvider.getConfig().getMesosTaskReconciliationIntervalSecs(), TimeUnit.SECONDS); } private void reconcileTasks(final SchedulerDriver driver) { try { if (reconciliationTrial++ % 2 == 0) reconcileTasksKnownToUs(driver); else reconcileAllMesosTasks(driver); } catch (Exception e) { // we don't want to throw errors lest periodically scheduled reconciliation be cancelled logger.error("Unexpected error (continuing): " + e.getMessage(), e); } } private void reconcileTasksKnownToUs(SchedulerDriver driver) { final List<TaskStatus> tasksToInitialize = new ArrayList<>(); for (Map.Entry<WorkerId, String> workerIdSlaveId : workerRegistry.getAllRunningWorkerSlaveIdMappings().entrySet()) { final WorkerId workerId = workerIdSlaveId.getKey(); final String slaveId = workerIdSlaveId.getValue(); if (logger.isDebugEnabled()) { logger.debug("reconcile running worker mapping {} -> {}", workerId.getId(), slaveId); } tasksToInitialize.add(TaskStatus.newBuilder() .setTaskId( Protos.TaskID.newBuilder() .setValue(workerId.getId()) .build()) .setState(Protos.TaskState.TASK_RUNNING) .setSlaveId(SlaveID.newBuilder().setValue(slaveId).build()) .build() ); } if (!tasksToInitialize.isEmpty()) { Protos.Status status = driver.reconcileTasks(tasksToInitialize); numReconcileTasks.increment(); logger.info("Sent request to reconcile " + tasksToInitialize.size() + " tasks, status=" + status); logger.info("Last offer received " + (System.currentTimeMillis() - lastOfferReceivedAt.get()) / 1000 + " secs ago"); logger.info("Last valid offer received " + (System.currentTimeMillis() - lastValidOfferReceivedAt.get()) / 1000 + " secs ago"); switch (status) { case DRIVER_ABORTED: case DRIVER_STOPPED: logger.error("Unexpected to see Mesos driver status of " + status + " from reconcile request. Committing suicide!"); System.exit(2); } } } private void reconcileAllMesosTasks(SchedulerDriver driver) { Protos.Status status = driver.reconcileTasks(Collections.emptyList()); numReconcileTasks.increment(); logger.info("Sent request to reconcile all tasks known to Mesos"); logger.info("Last offer received " + (System.currentTimeMillis() - lastOfferReceivedAt.get()) / 1000 + " secs ago"); logger.info("Last valid offer received " + (System.currentTimeMillis() - lastValidOfferReceivedAt.get()) / 1000 + " secs ago"); switch (status) { case DRIVER_ABORTED: case DRIVER_STOPPED: logger.error("Unexpected to see Mesos driver status of " + status + " from reconcile request (all tasks). Committing suicide!"); System.exit(2); } } @Override public void slaveLost(SchedulerDriver arg0, SlaveID arg1) { logger.warn("Lost slave " + arg1.getValue()); } @Override public void statusUpdate(final SchedulerDriver arg0, TaskStatus arg1) { Optional<WorkerId> workerIdO = WorkerId.fromId(arg1.getTaskId().getValue()); logger.debug("Task status update: ({}) state: {}({}) - {}", arg1.getTaskId().getValue(), arg1.getState(), arg1.getState().getNumber(), arg1.getMessage()); if (workerIdO.isPresent()) { WorkerId workerId = workerIdO.get(); VMResourceState state; String mesg = "Mesos task " + arg1.getState() + "-" + arg1.getMessage(); switch (arg1.getState()) { case TASK_FAILED: case TASK_LOST: state = VMResourceState.FAILED; break; case TASK_FINISHED: state = VMResourceState.COMPLETED; break; case TASK_RUNNING: state = VMResourceState.STARTED; break; case TASK_STAGING: case TASK_STARTING: state = VMResourceState.START_INITIATED; break; default: logger.warn("Unexpected Mesos task state " + arg1.getState()); return; } jobMessageRouter.routeWorkerEvent(new WorkerResourceStatus(workerId, mesg, state)); } else { logger.error("Failed to parse workerId from Mesos task update {}", arg1.getTaskId().getValue()); } } }
4,304
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/http
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/http/api/JobClusterInfo.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.http.api; import java.util.List; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.common.Label; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.parameter.Parameter; import io.mantisrx.server.master.store.NamedJob; public class JobClusterInfo { private final String name; private final String latestVersion; private final NamedJob.SLA sla; private final JobOwner owner; private final boolean disabled; private final boolean cronActive; private final List<JarInfo> jars; private final List<Parameter> parameters; private final List<Label> labels; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public JobClusterInfo( @JsonProperty("name") String name, @JsonProperty("sla") NamedJob.SLA sla, @JsonProperty("owner") JobOwner owner, @JsonProperty("disabled") boolean disabled, @JsonProperty("cronActive") boolean cronActive, @JsonProperty("jars") List<JarInfo> jars, @JsonProperty("parameters") List<Parameter> parameters, @JsonProperty("labels") List<Label> labels ) { this.name = name; this.sla = sla; this.owner = owner; this.disabled = disabled; this.cronActive = cronActive; this.jars = jars; this.labels = labels; if (jars == null || jars.isEmpty()) latestVersion = ""; else { JarInfo latest = null; for (JarInfo ji : jars) { if (latest == null || ji.uploadedAt > latest.uploadedAt) { latest = ji; } } latestVersion = latest == null ? "" : latest.version; } this.parameters = parameters; } public String getName() { return name; } public String getLatestVersion() { return latestVersion; } public NamedJob.SLA getSla() { return sla; } public JobOwner getOwner() { return owner; } public boolean isDisabled() { return disabled; } public boolean isCronActive() { return cronActive; } public List<JarInfo> getJars() { return jars; } public List<Parameter> getParameters() { return parameters; } public List<Label> getLabels() { return this.labels; } public static class JarInfo { private final String version; private final long uploadedAt; private final String url; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public JarInfo(@JsonProperty("version") String version, @JsonProperty("uploadedAt") long uploadedAt, @JsonProperty("url") String url) { this.version = version; this.uploadedAt = uploadedAt; this.url = url; } public String getVersion() { return version; } public long getUploadedAt() { return uploadedAt; } public String getUrl() { return url; } } }
4,305
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/http
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/http/api/CompactJobInfo.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.http.api; import java.util.HashMap; import java.util.List; import java.util.Map; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.common.Label; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.server.master.store.MantisJobMetadata; import io.mantisrx.server.master.store.MantisStageMetadata; import io.mantisrx.server.master.store.MantisWorkerMetadata; public class CompactJobInfo { private final String jobId; private final long submittedAt; private final String user; private final String jarUrl; private final MantisJobState state; private final MantisJobDurationType type; private final int numStages; private final int numWorkers; private final double totCPUs; private final double totMemory; private final Map<String, Integer> statesSummary; private final List<Label> labels; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public CompactJobInfo( @JsonProperty("jobID") String jobId, @JsonProperty("jarUrl") String jarUrl, @JsonProperty("submittedAt") long submittedAt, @JsonProperty("user") String user, @JsonProperty("state") MantisJobState state, @JsonProperty("type") MantisJobDurationType type, @JsonProperty("numStages") int numStages, @JsonProperty("numWorkers") int numWorkers, @JsonProperty("totCPUs") double totCPUs, @JsonProperty("totMemory") double totMemory, @JsonProperty("statesSummary") Map<String, Integer> statesSummary, @JsonProperty("labels") List<Label> labels ) { this.jobId = jobId; this.jarUrl = jarUrl; this.submittedAt = submittedAt; this.user = user; this.state = state; this.type = type; this.numStages = numStages; this.numWorkers = numWorkers; this.totCPUs = totCPUs; this.totMemory = totMemory; this.statesSummary = statesSummary; this.labels = labels; } static CompactJobInfo fromJob(MantisJobMetadata job) { if (job == null) return null; int workers = 0; double totCPUs = 0.0; double totMem = 0.0; Map<String, Integer> stSmry = new HashMap<>(); for (MantisStageMetadata s : job.getStageMetadata()) { workers += s.getNumWorkers(); totCPUs += s.getNumWorkers() * s.getMachineDefinition().getCpuCores(); totMem += s.getNumWorkers() * s.getMachineDefinition().getMemoryMB(); for (MantisWorkerMetadata w : s.getWorkerByIndexMetadataSet()) { final Integer prevVal = stSmry.get(w.getState() + ""); if (prevVal == null) stSmry.put(w.getState() + "", 1); else stSmry.put(w.getState() + "", prevVal + 1); } } String artifact = job.getJarUrl().toString(); return new CompactJobInfo( job.getJobId(), artifact, job.getSubmittedAt(), job.getUser(), job.getState(), job.getSla().getDurationType(), job.getNumStages(), workers, totCPUs, totMem, stSmry, job.getLabels() ); } public String getJobId() { return jobId; } public long getSubmittedAt() { return submittedAt; } public String getUser() { return user; } public MantisJobState getState() { return state; } public MantisJobDurationType getType() { return type; } public int getNumStages() { return numStages; } public int getNumWorkers() { return numWorkers; } public double getTotCPUs() { return totCPUs; } public double getTotMemory() { return totMemory; } public String getJarUrl() { return jarUrl; } public Map<String, Integer> getStatesSummary() { return statesSummary; } public List<Label> getLabels() { return this.labels; } }
4,306
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/jobmgmt/MantisJobStateAdapter.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.jobmgmt; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.server.master.scheduler.WorkerResourceStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class MantisJobStateAdapter { private static final Logger logger = LoggerFactory.getLogger(MantisJobStateAdapter.class); // Mark constructor private as this class is not intended to be instantiated private MantisJobStateAdapter() {} public static final MantisJobState valueOf(final WorkerResourceStatus.VMResourceState resourceState) { final MantisJobState state; switch (resourceState) { case START_INITIATED: state = MantisJobState.StartInitiated; break; case STARTED: state = MantisJobState.Started; break; case FAILED: state = MantisJobState.Failed; break; case COMPLETED: state = MantisJobState.Completed; break; default: logger.error("Missing MantisJobState mapping for VMResourceState {}", resourceState); throw new IllegalArgumentException("unknown enum value for VMResourceState " + resourceState); } return state; } }
4,307
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/jobmgmt/JobRegistry.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.jobmgmt; import io.mantisrx.server.master.MantisJobMgr; import io.mantisrx.server.master.store.NamedJob; import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; public class JobRegistry { // final ConcurrentHashMap<String, MantisJobMgr> jobManagers = new ConcurrentHashMap<>(); // final ConcurrentMap<String, NamedJob> jobClusters = new ConcurrentHashMap<>(); // // // public Optional<MantisJobMgr> getJobManager(final String jobId) { // return Optional.ofNullable(jobManagers.get(jobId)); // } // // public Optional<NamedJob> getJobCluster(final String jobCluster) { // return Optional.ofNullable(jobClusters.get(jobCluster)); // } // // public MantisJobMgr addJobId(final String jobId, final MantisJobMgr mantisJobMgr) { // return jobManagers.put(jobId, mantisJobMgr); // } // // public MantisJobMgr removeJobId(final String jobId) { // return jobManagers.remove(jobId); // } // // public Set<String> getAllJobIds() { // return new HashSet<>(jobManagers.keySet()); // } // // public Set<String> getAllActiveJobIds() { // Set<String> retSet = new HashSet<>(); // for(Map.Entry<String, MantisJobMgr> entry: jobManagers.entrySet()) { // if(!entry.getValue().getAllRunningWorkers().isEmpty()) // retSet.add(entry.getKey()); // } // return retSet; // } // // public Collection<MantisJobMgr> getAllJobManagers() { // return jobManagers.values(); // } // // public Collection<NamedJob> getAllJobClusters() { // return jobClusters.values(); // } // // public NamedJob addJobClusterIfAbsent(final NamedJob jobCluster) { // return jobClusters.putIfAbsent(jobCluster.getName(), jobCluster); // } // // public NamedJob removeJobCluster(final String jobCluster) { // return jobClusters.remove(jobCluster); // } }
4,308
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence/SimpleCachedFileStorageProvider.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.persistence; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Optional; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventPublisherImpl; import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl; import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl; import io.mantisrx.master.jobcluster.IJobClusterMetadata; import io.mantisrx.master.jobcluster.JobClusterMetadataImpl; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.IMantisStageMetadata; import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl; import io.mantisrx.master.jobcluster.job.MantisStageMetadataImpl; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.master.jobcluster.job.worker.MantisWorkerMetadataImpl; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.exceptions.InvalidJobException; import io.mantisrx.server.master.persistence.exceptions.JobClusterAlreadyExistsException; import io.mantisrx.server.master.store.InvalidNamedJobException; import io.mantisrx.server.master.store.JobAlreadyExistsException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.functions.Action1; /** * Simple File based storage provider. Intended mainly as a sample implementation for * {@link IMantisStorageProvider} interface. This implementation is complete in its functionality, but, isn't * expected to be scalable or performant for production loads. * <P>This implementation uses <code>/tmp/MantisSpool/</code> as the spool directory. The directory is created * if not present already. It will fail only if either a file with that name exists or if a directory with that * name exists but isn't writable.</P> */ public class SimpleCachedFileStorageProvider implements IMantisStorageProvider { private final static String SPOOL_DIR = "/tmp/MantisSpool"; private final static String ARCHIVE_DIR = "/tmp/MantisArchive"; private static final Logger logger = LoggerFactory.getLogger(SimpleCachedFileStorageProvider.class); private static final String JOB_CLUSTERS_DIR = SPOOL_DIR + "/jobClusters"; private static final String JOB_CLUSTERS_COMPLETED_JOBS_FILE_NAME_SUFFIX = "-completedJobs"; private static final String ACTIVE_VMS_FILENAME = "activeVMs"; private static final SimpleFilterProvider DEFAULT_FILTER_PROVIDER; static { DEFAULT_FILTER_PROVIDER = new SimpleFilterProvider(); DEFAULT_FILTER_PROVIDER.setFailOnUnknownId(false); } private final ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); private final LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl()); public SimpleCachedFileStorageProvider() { this(false); } public SimpleCachedFileStorageProvider(boolean cleanupExistingData) { if (cleanupExistingData) { deleteAllFiles(); } new File(SPOOL_DIR).mkdirs(); new File(ARCHIVE_DIR).mkdirs(); new File(JOB_CLUSTERS_DIR).mkdirs(); logger.debug(" created"); mapper.setFilterProvider(DEFAULT_FILTER_PROVIDER); } private static String getWorkerFilename(String prefix, String jobId, int workerIndex, int workerNumber) { return prefix + File.separator + "Worker-" + jobId + "-" + workerIndex + "-" + workerNumber; } // @Override public void archiveJob(String jobId) throws IOException { File jobFile = new File(getJobFileName(SPOOL_DIR, jobId)); jobFile.renameTo(new File(getJobFileName(ARCHIVE_DIR, jobId))); archiveStages(jobId); archiveWorkers(jobId); } // @Override public Optional<IMantisJobMetadata> loadArchivedJob(String jobId) throws IOException { return loadJob(ARCHIVE_DIR, jobId); } public Optional<IMantisJobMetadata> loadActiveJob(String jobId) throws IOException { return loadJob(SPOOL_DIR, jobId); } public Optional<IMantisJobMetadata> loadArchiveJob(String jobId) throws IOException { return loadJob(ARCHIVE_DIR, jobId); } private Optional<IMantisJobMetadata> loadJob(String dir, String jobId) throws IOException { File jobFile = new File(getJobFileName(dir, jobId)); IMantisJobMetadata job = null; if (jobFile.exists()) { try (FileInputStream fis = new FileInputStream(jobFile)) { job = mapper.readValue(fis, MantisJobMetadataImpl.class); } for (IMantisStageMetadata stage : readStagesFor(new File(dir), jobId)) ((MantisJobMetadataImpl) job).addJobStageIfAbsent(stage); for (IMantisWorkerMetadata worker : readWorkersFor(new File(dir), jobId)) { try { JobWorker jobWorker = new JobWorker.Builder() .from(worker) .withLifecycleEventsPublisher(eventPublisher) .build(); ((MantisJobMetadataImpl) job).addWorkerMetadata(worker.getStageNum(), jobWorker); } catch (InvalidJobException e) { logger.warn("Unexpected error adding worker index=" + worker.getWorkerIndex() + ", number=" + worker.getWorkerNumber() + " for job " + jobId + ": " + e.getMessage(), e); } } } return Optional.ofNullable(job); } @Override public void storeMantisStage(IMantisStageMetadata msmd) throws IOException { storeStage(msmd, false); } private void storeStage(IMantisStageMetadata msmd, boolean rewrite) throws IOException { System.out.println("Storing stage " + msmd); File stageFile = new File(getStageFileName(SPOOL_DIR, msmd.getJobId(), msmd.getStageNum())); if (rewrite) stageFile.delete(); try {stageFile.createNewFile();} catch (SecurityException se) { throw new IOException("Can't create new file " + stageFile.getAbsolutePath(), se); } try (PrintWriter pwrtr = new PrintWriter(stageFile)) { mapper.writeValue(pwrtr, msmd); } System.out.println("Stored stage " + msmd); } @Override public void updateMantisStage(IMantisStageMetadata msmd) throws IOException { storeStage(msmd, true); } private void archiveStages(String jobId) { File spoolDir = new File(SPOOL_DIR); for (File sFile : spoolDir.listFiles((dir, name) -> { return name.startsWith("Stage-" + jobId + "-"); })) { sFile.renameTo(new File(ARCHIVE_DIR + File.separator + sFile.getName())); } } private String getStageFileName(String dirName, JobId jobId, int stageNum) { return dirName + "/Stage-" + jobId.getId() + "-" + stageNum; } @Override public void storeWorker(IMantisWorkerMetadata workerMetadata) throws IOException { storeWorker(workerMetadata.getJobIdObject(), workerMetadata, false); } @Override public void storeWorkers(String jobId, List<IMantisWorkerMetadata> workers) throws IOException { for (IMantisWorkerMetadata w : workers) storeWorker(w); } @Override public void storeAndUpdateWorkers(IMantisWorkerMetadata existingWorker, IMantisWorkerMetadata newWorker) throws InvalidJobException, IOException { if (!existingWorker.getJobId().equals(newWorker.getJobId())) throw new InvalidJobException(existingWorker.getJobId()); // As the name indicates, this is a simple storage implementation that does not actually have the // atomicity. Instead, we update worker2, followed by storing worker1 updateWorker(existingWorker); storeWorker(newWorker); // now move the terminated worker to archived state archiveWorker(existingWorker); } @Override public void updateWorker(IMantisWorkerMetadata mwmd) throws IOException { storeWorker(mwmd.getJobIdObject(), mwmd, true); } private void createDir(String dirName) { File spoolDirLocation = new File(dirName); if (spoolDirLocation.exists() && !(spoolDirLocation.isDirectory() && spoolDirLocation.canWrite())) throw new UnsupportedOperationException("Directory [" + dirName + "] not writeable"); if (!spoolDirLocation.exists()) try {spoolDirLocation.mkdirs();} catch (SecurityException se) { throw new UnsupportedOperationException("Can't create dir for writing state - " + se.getMessage(), se); } } // @Override public List<IMantisJobMetadata> loadAllJobs() { List<IMantisJobMetadata> jobList = Lists.newArrayList(); createDir(SPOOL_DIR); createDir(ARCHIVE_DIR); File spoolDirFile = new File(SPOOL_DIR); for (File jobFile : spoolDirFile.listFiles((dir, name) -> { return name.startsWith("Job-"); })) { try { String jobId = jobFile.getName().substring("Job-".length()); Optional<IMantisJobMetadata> jobMetaOp = loadJob(SPOOL_DIR, jobId); if (jobMetaOp.isPresent()) { jobList.add(jobMetaOp.get()); } } catch (IOException e) { logger.error("Error reading job metadata - " + e.getMessage()); } } // if(_debug) { // // print all jobs read // for(MantisJobMetadata mjmd: retList) { // logger.info(" JOB " + mjmd.getJobId()); // for(MantisStageMetadata msmd: mjmd.getStageMetadata()) { // logger.info(" Stage " + msmd.getStageNum() + " of " + msmd.getNumStages()); // for(MantisWorkerMetadata mwmd: msmd.getWorkerByIndexMetadataSet()) { // logger.info(" " + mwmd); // } // } // } // } return jobList; } @Override public Observable<IMantisJobMetadata> loadAllArchivedJobs() { List<IMantisJobMetadata> jobList = Lists.newArrayList(); createDir(ARCHIVE_DIR); File archiveDirFile = new File(ARCHIVE_DIR); for (File jobFile : archiveDirFile.listFiles((dir, name) -> { return name.startsWith("Job-"); })) { try { String jobId = jobFile.getName().substring("Job-".length()); Optional<IMantisJobMetadata> jobMetaOp = loadJob(ARCHIVE_DIR, jobId); if (jobMetaOp.isPresent()) { jobList.add(jobMetaOp.get()); } } catch (IOException e) { logger.error("Error reading job metadata - " + e.getMessage()); } } return Observable.from(jobList); } @Override public List<IJobClusterMetadata> loadAllJobClusters() { createDir(JOB_CLUSTERS_DIR); File jobClustersDir = new File(JOB_CLUSTERS_DIR); final List<IJobClusterMetadata> jobClusterMetadataList = new ArrayList<>(); for (File jobClusterFile : jobClustersDir.listFiles()) { try (FileInputStream fis = new FileInputStream(jobClusterFile)) { jobClusterMetadataList.add(mapper.readValue(fis, JobClusterMetadataImpl.class)); } catch (Exception e) { logger.error("skipped file {} due to exception when loading job cluster", jobClusterFile.getName(), e); } } return jobClusterMetadataList; } // @Override public Optional<IJobClusterMetadata> loadJobCluster(String clusterName) { File jobClusterFile = new File(JOB_CLUSTERS_DIR + "/" + clusterName); if (jobClusterFile.exists()) { try (FileInputStream fis = new FileInputStream(jobClusterFile)) { IJobClusterMetadata jobClustermeta = mapper.readValue(fis, JobClusterMetadataImpl.class); return Optional.ofNullable(jobClustermeta); } catch (Exception e) { logger.error("skipped file {} due to exception when loading job cluster", jobClusterFile.getName(), e); } } logger.warn("No such job cluster {} ", clusterName); return Optional.empty(); } @Override public List<CompletedJob> loadAllCompletedJobs() throws IOException { createDir(JOB_CLUSTERS_DIR); List<CompletedJob> completedJobs = Lists.newArrayList(); File clustersDir = new File(JOB_CLUSTERS_DIR); for (File jobClusterFile : clustersDir.listFiles( (dir, name) -> name.endsWith(JOB_CLUSTERS_COMPLETED_JOBS_FILE_NAME_SUFFIX) )) { try (FileInputStream fis = new FileInputStream(jobClusterFile)) { final List<CompletedJob> list = mapper.readValue(fis, new TypeReference<List<CompletedJob>>() {}); if (list != null && !list.isEmpty()) list.forEach(completedJobs::add); } catch (Exception e) { logger.error("Exception loading completedJob ", e); } } return completedJobs; } // @Override // public void shutdown() { // // no clean up needed // } // private void storeWorker(JobId jobId, IMantisWorkerMetadata workerMetadata, boolean rewrite) throws IOException { logger.info("Storing worker {}", workerMetadata); File workerFile = new File(getWorkerFilename(SPOOL_DIR, jobId.getId(), workerMetadata.getWorkerIndex(), workerMetadata.getWorkerNumber())); if (rewrite) workerFile.delete(); workerFile.createNewFile(); try (PrintWriter pwrtr = new PrintWriter(workerFile)) { mapper.writeValue(pwrtr, workerMetadata); } logger.info("Stored worker {}", workerMetadata); } // private List<IMantisStageMetadata> readStagesFor(File spoolDir, final String id) throws IOException { List<IMantisStageMetadata> stageList = new ArrayList<>(); for (File stageFile : spoolDir.listFiles((dir, name) -> { return name.startsWith("Stage-" + id + "-"); })) { logger.info("Reading stage file " + stageFile.getName()); try (FileInputStream fis = new FileInputStream(stageFile)) { stageList.add(mapper.readValue(fis, MantisStageMetadataImpl.class)); } } return stageList; } private List<IMantisWorkerMetadata> readWorkersFor(File spoolDir, final String id) { List<IMantisWorkerMetadata> workerList = new ArrayList<>(); for (File workerFile : spoolDir.listFiles((dir, name) -> { return name.startsWith("Worker-" + id + "-"); })) { logger.info("Reading worker file " + workerFile.getName()); try (FileInputStream fis = new FileInputStream(workerFile)) { workerList.add(mapper.readValue(fis, MantisWorkerMetadataImpl.class)); } catch (IOException e) { e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates. } } return workerList; } private void archiveWorkers(String jobId) throws IOException { File spoolDir = new File(SPOOL_DIR); for (File wFile : spoolDir.listFiles((dir, name) -> { return name.startsWith("Worker-" + jobId + "-"); })) { wFile.renameTo(new File(ARCHIVE_DIR + File.separator + wFile.getName())); } } // // private String getNamedJobFileName(String name) { // return JOB_CLUSTERS_DIR+"/"+name+".job"; // } // // @Override public void archiveWorker(IMantisWorkerMetadata mwmd) throws IOException { File wFile = new File(getWorkerFilename(SPOOL_DIR, mwmd.getJobId(), mwmd.getWorkerIndex(), mwmd.getWorkerNumber())); if (wFile.exists()) wFile.renameTo(new File(getWorkerFilename(ARCHIVE_DIR, mwmd.getJobId(), mwmd.getWorkerIndex(), mwmd.getWorkerNumber()))); } @Override public void createJobCluster(IJobClusterMetadata jobCluster) throws JobClusterAlreadyExistsException, IOException { String name = jobCluster.getJobClusterDefinition().getName(); File tmpFile = new File(JOB_CLUSTERS_DIR + "/" + name); logger.info("Storing job cluster " + name + " to file " + tmpFile.getAbsolutePath()); if (!tmpFile.createNewFile()) { throw new JobClusterAlreadyExistsException(name); } PrintWriter pwrtr = new PrintWriter(tmpFile); mapper.writeValue(pwrtr, jobCluster); logger.info("Stored job cluster " + name + " to file " + tmpFile.getAbsolutePath()); } @Override public void deleteJobCluster(String name) { File jobFile = new File(JOB_CLUSTERS_DIR + File.separator + name); try { if (!jobFile.exists()) { throw new InvalidNamedJobException(name + " doesn't exist"); } boolean jobClusterDeleted = jobFile.delete(); File completedJobsFile = new File(JOB_CLUSTERS_DIR + File.separator + name + JOB_CLUSTERS_COMPLETED_JOBS_FILE_NAME_SUFFIX); boolean completedJobClusterDeleted = completedJobsFile.delete(); if (!jobClusterDeleted) { //|| !completedJobClusterDeleted) { throw new Exception("JobCluster " + name + " could not be deleted"); } else { logger.info(" job cluster " + name + " deleted "); } } catch (Exception e) { throw new RuntimeException(e); } } @Override public void updateJobCluster(IJobClusterMetadata jobCluster) { String name = jobCluster.getJobClusterDefinition().getName(); File tmpFile = new File(JOB_CLUSTERS_DIR + "/" + name); logger.info("Updating job cluster " + name + " to file " + tmpFile.getAbsolutePath()); try { if (!tmpFile.exists()) { throw new InvalidNamedJobException(name + " does not exist"); } tmpFile.delete(); tmpFile.createNewFile(); PrintWriter pwrtr = new PrintWriter(tmpFile); mapper.writeValue(pwrtr, jobCluster); } catch (IOException | InvalidNamedJobException e) { throw new RuntimeException(e); } } @Override public void storeNewJob(IMantisJobMetadata jobMetadata) { File tmpFile = new File(SPOOL_DIR + "/Job-" + jobMetadata.getJobId()); try { if (!tmpFile.createNewFile()) { throw new JobAlreadyExistsException(jobMetadata.getJobId().getId()); } try (PrintWriter pwrtr = new PrintWriter(tmpFile)) { mapper.writeValue(pwrtr, jobMetadata); } } catch (IOException | JobAlreadyExistsException e) { throw new RuntimeException(e); } } @Override public void updateJob(IMantisJobMetadata jobMetadata) throws InvalidJobException, IOException { File jobFile = new File(getJobFileName(SPOOL_DIR, jobMetadata.getJobId().getId())); if (!jobFile.exists()) { throw new InvalidJobException(jobMetadata.getJobId().getId()); } jobFile.delete(); jobFile.createNewFile(); try (PrintWriter pwrtr = new PrintWriter(jobFile)) { mapper.writeValue(pwrtr, jobMetadata); } } public void deleteAllFiles() { try { File spoolDir = new File(SPOOL_DIR); File archiveDir = new File(ARCHIVE_DIR); deleteDir(spoolDir); deleteDir(archiveDir); } catch (Exception e) { logger.error("caught unexpected exception ", e); } } private void deleteDir(File dir) { if (dir != null) { for (File file : dir.listFiles()) { if (file.isDirectory()) { deleteDir(file); } else { boolean delete = file.delete(); logger.info("deleted file {}? {}", file.getName(), delete); } } } } private void deleteFiles(String dirName, final String jobId, final String filePrefix) { File spoolDir = new File(dirName); if (spoolDir != null) { for (File stageFile : spoolDir.listFiles((dir, name) -> { return name.startsWith(filePrefix + jobId + "-"); })) { stageFile.delete(); } } } @Override public void deleteJob(String jobId) throws InvalidJobException, IOException { File tmpFile = new File(SPOOL_DIR + "/Job-" + jobId); tmpFile.delete(); deleteFiles(SPOOL_DIR, jobId, "Stage-"); deleteFiles(SPOOL_DIR, jobId, "Worker-"); tmpFile = new File(ARCHIVE_DIR + "/Job-" + jobId); tmpFile.delete(); deleteFiles(ARCHIVE_DIR, jobId, "Stage-"); deleteFiles(ARCHIVE_DIR, jobId, "Worker-"); } private String getJobFileName(String dirName, String jobId) { return dirName + "/Job-" + jobId; } @Override public void storeCompletedJobForCluster(String name, CompletedJob job) throws IOException { modifyCompletedJobsForCluster(name, list -> list.add(job)); } private void modifyCompletedJobsForCluster(String name, Action1<List<CompletedJob>> modifier) throws IOException { File completedJobsFile = new File(JOB_CLUSTERS_DIR + File.separator + name + JOB_CLUSTERS_COMPLETED_JOBS_FILE_NAME_SUFFIX); List<CompletedJob> completedJobs = new LinkedList<>(); if (completedJobsFile.exists()) { try (FileInputStream fis = new FileInputStream(completedJobsFile)) { completedJobs.addAll(mapper.readValue(fis, new TypeReference<List<CompletedJob>>() {})); } } modifier.call(completedJobs); completedJobsFile.delete(); completedJobsFile.createNewFile(); try (PrintWriter w = new PrintWriter(completedJobsFile)) { mapper.writeValue(w, completedJobs); } } @Override public void removeCompletedJobForCluster(String name, String jobId) throws IOException { modifyCompletedJobsForCluster(name, list -> { if (list != null) { final Iterator<CompletedJob> iterator = list.iterator(); while (iterator.hasNext()) { final CompletedJob next = iterator.next(); if (next.getJobId().equals(jobId)) { iterator.remove(); break; } } } }); } @Override public void setActiveVmAttributeValuesList(List<String> vmAttributesList) throws IOException { File activeSlavesFile = new File(SPOOL_DIR + File.separator + ACTIVE_VMS_FILENAME); logger.info("Storing file " + activeSlavesFile.getAbsolutePath()); if (activeSlavesFile.exists()) activeSlavesFile.delete(); activeSlavesFile.createNewFile(); try (PrintWriter wrtr = new PrintWriter(activeSlavesFile)) { mapper.writeValue(wrtr, vmAttributesList); } } @Override public List<String> initActiveVmAttributeValuesList() throws IOException { File activeSlavesFile = new File(SPOOL_DIR + File.separator + ACTIVE_VMS_FILENAME); if (!activeSlavesFile.exists()) return Collections.EMPTY_LIST; try (FileInputStream fis = new FileInputStream(activeSlavesFile)) { return mapper.readValue(fis, new TypeReference<List<String>>() {}); } } // @Override // public Optional<IJobClusterMetadata> getJobCluster(String clusterName) throws Exception { // File jobClustersDir = new File(JOB_CLUSTERS_DIR); // final List<JobClusterMetadataImpl> jobClusterMetadataList = new ArrayList<>(); // File jobClusterFile = new File(JOB_CLUSTERS_DIR + File.separator + clusterName); // if(jobClusterFile.exists()) { // try (FileInputStream fis = new FileInputStream(jobClusterFile)) { // IJobClusterMetadata clusterMeta = mapper.readValue(fis, JobClusterMetadataImpl.class); // return Optional.of(clusterMeta); // } catch (Exception e) { // logger.error("skipped file {} due to exception when loading job cluster", jobClusterFile.getName(), e); // throw e; // } // } else { // logger.error("No such file {} ", jobClusterFile); // return Optional.empty(); // } // // // } @Override public List<IMantisWorkerMetadata> getArchivedWorkers(String jobId) { // TODO Auto-generated method stub return null; } }
4,309
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence/IMantisStorageProvider.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.persistence; import java.io.IOException; import java.util.List; import java.util.Optional; import io.mantisrx.master.jobcluster.IJobClusterMetadata; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.IMantisStageMetadata; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob; import io.mantisrx.server.master.persistence.exceptions.InvalidJobException; import rx.Observable; public interface IMantisStorageProvider { void storeNewJob(IMantisJobMetadata jobMetadata) throws Exception; void updateJob(final IMantisJobMetadata jobMetadata) throws Exception; /** * // * Mark the job as not active and move it to an inactive archived collection of jobs. * // * @param jobId The Job Id of the job to archive * // * @throws IOException upon errors with storage invocation * // */ void archiveJob(final String jobId) throws IOException; void deleteJob(String jobId) throws Exception; void storeMantisStage(final IMantisStageMetadata msmd) throws IOException; void updateMantisStage(final IMantisStageMetadata msmd) throws IOException; /** * Store a new worker for the given job and stage number. This will be called only once for a given * worker. However, it is possible that concurrent calls can be made on a <code>jobId</code>, each with a * different worker. * * @param workerMetadata The worker metadata to store. * * @throws IOException */ void storeWorker(final IMantisWorkerMetadata workerMetadata) throws IOException; /** * Store multiple new workers for the give job. This is called only once for a given worker. This method enables * optimization by calling storage once for multiple workers. * * @param jobId The Job ID. * @param workers The list of workers to store. * * @throws IOException if there were errors storing the workers. */ void storeWorkers(final String jobId, final List<IMantisWorkerMetadata> workers) throws IOException; /** * Store a new worker and update existing worker of a job atomically. Either both are stored or none is. * * @param existingWorker Existing worker to update. * @param newWorker New worker to store. * * @throws IOException * @throws InvalidJobException If workers don't have the same JobId. * @throws Exception */ void storeAndUpdateWorkers(final IMantisWorkerMetadata existingWorker, final IMantisWorkerMetadata newWorker) throws InvalidJobException, IOException, Exception; void updateWorker(IMantisWorkerMetadata worker) throws IOException; List<IMantisJobMetadata> loadAllJobs() throws IOException; Observable<IMantisJobMetadata> loadAllArchivedJobs(); // /** // * Initialize and return all existing NamedJobs from persistence. // * @return List of {@link NamedJob} objects. // * @throws IOException Upon error connecting to or reading from persistence. // */ List<IJobClusterMetadata> loadAllJobClusters() throws IOException; List<CompletedJob> loadAllCompletedJobs() throws IOException; void archiveWorker(IMantisWorkerMetadata mwmd) throws IOException; List<IMantisWorkerMetadata> getArchivedWorkers(String jobId) throws IOException; void createJobCluster(IJobClusterMetadata jobCluster) throws Exception; void updateJobCluster(IJobClusterMetadata jobCluster) throws Exception; void deleteJobCluster(String name) throws Exception; void storeCompletedJobForCluster(String name, CompletedJob job) throws IOException; void removeCompletedJobForCluster(String name, String jobId) throws IOException; Optional<IMantisJobMetadata> loadArchivedJob(String jobId) throws IOException; ////////////////////////////////// // Optional<IJobClusterMetadata> getJobCluster(String clusterName) throws Exception; //Optional<IJobClusterMetadata> loadJobCluster(String clusterName); // // CompletionStage<Void> shutdown(); // List<String> initActiveVmAttributeValuesList() throws IOException; void setActiveVmAttributeValuesList(final List<String> vmAttributesList) throws IOException; }
4,310
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence/MantisJobStore.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.persistence; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.PriorityBlockingQueue; import io.mantisrx.shaded.com.google.common.cache.Cache; import io.mantisrx.shaded.com.google.common.cache.CacheBuilder; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.master.jobcluster.IJobClusterMetadata; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.IMantisStageMetadata; import io.mantisrx.master.jobcluster.job.MantisStageMetadataImpl; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob; import io.mantisrx.server.master.persistence.exceptions.InvalidJobException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.schedulers.Schedulers; public class MantisJobStore { private static final Logger logger = LoggerFactory.getLogger(MantisJobStore.class); private final IMantisStorageProvider storageProvider; private final ConcurrentMap<String, String> archivedJobIds; private final ArchivedJobsMetadataCache archivedJobsMetadataCache; private final ArchivedWorkersCache archivedWorkersCache; private final PriorityBlockingQueue<TerminatedJob> terminatedJobsToDelete; public MantisJobStore(IMantisStorageProvider storageProvider) { this.storageProvider = storageProvider; archivedJobIds = new ConcurrentHashMap<>(); archivedWorkersCache = new ArchivedWorkersCache(ConfigurationProvider.getConfig().getMaxArchivedJobsToCache()); archivedJobsMetadataCache = new ArchivedJobsMetadataCache(ConfigurationProvider.getConfig().getMaxArchivedJobsToCache()); terminatedJobsToDelete = new PriorityBlockingQueue<>(); } public void loadAllArchivedJobsAsync() { logger.info("Beginning load of Archived Jobs"); storageProvider.loadAllArchivedJobs() .subscribeOn(Schedulers.io()) .subscribe((job) -> { archivedJobsMetadataCache.add(job); archivedJobIds.put(job.getJobId().getId(), job.getJobId().getId()); terminatedJobsToDelete.add(new TerminatedJob(job.getJobId().getId(), getTerminatedAt(job))); }, (e) -> { logger.warn("Exception loading archived Jobs", e); }, () -> { logger.info("Finished Loading all archived Jobs!"); }); } private long getTerminatedAt(IMantisJobMetadata mjmd) { long terminatedAt = mjmd.getSubmittedAtInstant().toEpochMilli(); for (IMantisStageMetadata msmd : mjmd.getStageMetadata().values()) { for (JobWorker mwmd : msmd.getAllWorkers()) { terminatedAt = Math.max(terminatedAt, mwmd.getMetadata().getCompletedAt()); } } return terminatedAt; } public List<IJobClusterMetadata> loadAllJobClusters() throws IOException { if (logger.isTraceEnabled()) {logger.trace("Loading all job clusters"); } List<IJobClusterMetadata> iJobClusterMetadataList = storageProvider.loadAllJobClusters(); logger.info("Loaded {} job clusters", iJobClusterMetadataList.size()); return iJobClusterMetadataList; } public List<IMantisJobMetadata> loadAllActiveJobs() throws IOException { if (logger.isTraceEnabled()) {logger.trace("Loading all active jobs"); } List<IMantisJobMetadata> mantisJobMetadataList = storageProvider.loadAllJobs(); logger.info("Loaded {} active jobs", mantisJobMetadataList.size()); return mantisJobMetadataList; } public List<CompletedJob> loadAllCompletedJobs() throws IOException { if (logger.isTraceEnabled()) {logger.trace("Loading all completed jobs"); } List<CompletedJob> completedJobs = storageProvider.loadAllCompletedJobs(); logger.info("Loaded {} completed jobs", completedJobs.size()); return completedJobs; } public void createJobCluster(IJobClusterMetadata jobCluster) throws Exception { if (logger.isTraceEnabled()) {logger.trace("Creating Job Cluster {}", jobCluster); } storageProvider.createJobCluster(jobCluster); if (logger.isTraceEnabled()) { logger.trace("Created Job Cluster {}", jobCluster.getJobClusterDefinition().getName()); } } public void updateJobCluster(IJobClusterMetadata jobCluster) throws Exception { if (logger.isTraceEnabled()) {logger.trace("Updating Job Cluster {}", jobCluster); } storageProvider.updateJobCluster(jobCluster); if (logger.isTraceEnabled()) { logger.trace("Updated Job Cluster {}", jobCluster.getJobClusterDefinition().getName()); } } public void deleteJobCluster(String name) throws Exception { if (logger.isTraceEnabled()) {logger.trace("Deleting Job Cluster {}", name); } storageProvider.deleteJobCluster(name); if (logger.isTraceEnabled()) {logger.trace("Deleted Job Cluster {}", name); } } public void deleteJob(String jobId) throws Exception { if (logger.isTraceEnabled()) {logger.trace("Deleting Job {}", jobId); } archivedJobsMetadataCache.remove(jobId); archivedWorkersCache.remove(jobId); storageProvider.deleteJob(jobId); if (logger.isTraceEnabled()) {logger.trace("Deleted Job {}", jobId); } } public void deleteCompletedJob(String clusterName, String jobId) throws IOException { if (logger.isTraceEnabled()) {logger.trace("Deleting completed Job {}", jobId); } storageProvider.removeCompletedJobForCluster(clusterName, jobId); if (logger.isTraceEnabled()) {logger.trace("Deleted completed job {}", jobId); } } public void storeCompletedJobForCluster(String name, CompletedJob completedJob) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Storing completed Job for cluster {}", completedJob);} storageProvider.storeCompletedJobForCluster(name, completedJob); if (logger.isTraceEnabled()) { logger.trace("Stored completed Job for cluster {}", completedJob);} } public void storeNewJob(IMantisJobMetadata jobMetadata) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Storing new Job{}", jobMetadata);} storageProvider.storeNewJob(jobMetadata); if (logger.isTraceEnabled()) { logger.trace("Stored new Job {}", jobMetadata);} } public void replaceTerminatedWorker(IMantisWorkerMetadata oldWorker, IMantisWorkerMetadata replacement) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Replace terminated worker {} with new worker {}", oldWorker, replacement); } storageProvider.storeAndUpdateWorkers(oldWorker, replacement); if (logger.isTraceEnabled()) { logger.trace("Replaced terminated worker {}", oldWorker);} } public void updateJob(final IMantisJobMetadata jobMetadata) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Update Job {}", jobMetadata);} storageProvider.updateJob(jobMetadata); if (logger.isTraceEnabled()) { logger.trace("Updated Job {}", jobMetadata);} } public void updateStage(IMantisStageMetadata stageMeta) throws IOException { storageProvider.updateMantisStage(stageMeta); } public List<? extends IMantisWorkerMetadata> storeNewWorkers(IMantisJobMetadata job, List<IMantisWorkerMetadata> workerRequests) throws IOException, InvalidJobException { if (logger.isTraceEnabled()) { logger.trace("Storing new workers for Job {} ", job);} if (workerRequests == null || workerRequests.isEmpty()) return null; String jobId = workerRequests.get(0).getJobId(); if (logger.isDebugEnabled()) { logger.debug("Adding " + workerRequests.size() + " workers for job " + jobId); } List<IMantisWorkerMetadata> addedWorkers = new ArrayList<>(); List<Integer> savedStageList = Lists.newArrayList(); for (IMantisWorkerMetadata workerRequest : workerRequests) { // store stage if not stored already if (!savedStageList.contains(workerRequest.getStageNum())) { Optional<IMantisStageMetadata> stageMetadata = job.getStageMetadata(workerRequest.getStageNum()); if (stageMetadata.isPresent()) { storageProvider.storeMantisStage(stageMetadata.get()); } else { throw new RuntimeException(String.format("No such stage {}", workerRequest.getStageNum())); } savedStageList.add(workerRequest.getStageNum()); } addedWorkers.add(workerRequest); } storageProvider.storeWorkers(jobId, addedWorkers); if (logger.isTraceEnabled()) { logger.trace("Stored new workers for Job {}", addedWorkers);} return addedWorkers; } public void storeNewWorker(IMantisWorkerMetadata workerRequest) throws IOException, InvalidJobException { if (logger.isTraceEnabled()) { logger.trace("Adding worker index=" + workerRequest.getWorkerIndex()); } //if(job == null) // throw new InvalidJobException(workerRequest.getJobId(), workerRequest.getStageNum(), workerRequest.getWorkerIndex()); // if(job.getStageMetadata(workerRequest.getWorkerStage()) == null) { // IMantisStageMetadata msmd = new MantisStageMetadataImpl.Builder().from(workerRequest).build(); // boolean added = job.addJobStageIfAbsent(msmd); // if(added) // storageProvider.storeMantisStage(msmd); // store the new // } // IMantisWorkerMetadata mwmd = new MantisWorkerMetadataImpl.Builder().from(workerRequest).build(); // if(!job.addWorkerMetadata(workerRequest.getWorkerStage(), mwmd, null)) { // IMantisWorkerMetadata tmp = job.getWorkerByIndex(workerRequest.getWorkerStage(), workerRequest.getWorkerIndex()); // throw new InvalidJobException(job.getJobId().getId(), workerRequest.getWorkerStage(), workerRequest.getWorkerIndex(), // new Exception("Couldn't add worker " + workerRequest.getWorkerNumber() + " as index " + // workerRequest.getWorkerIndex() + ", that index already has worker " + // tmp.getWorkerNumber())); // } storageProvider.storeWorker(workerRequest); } public void updateWorker(IMantisWorkerMetadata worker) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Updating worker index=" + worker.getWorkerIndex()); } storageProvider.updateWorker(worker); if (logger.isTraceEnabled()) { logger.trace("Updated worker index=" + worker.getWorkerIndex()); } // make archive explicit // if(archiveIfError && WorkerState.isErrorState(worker.getState())) { // archiveWorker(worker); // } } private void archiveWorkersIfAny(IMantisJobMetadata mjmd) throws IOException { for (IMantisStageMetadata msmd : mjmd.getStageMetadata().values()) { for (JobWorker removedWorker : ((MantisStageMetadataImpl) msmd).removeArchiveableWorkers()) { archiveWorker(removedWorker.getMetadata()); } } } public void archiveWorker(IMantisWorkerMetadata worker) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Archiving worker index=" + worker.getWorkerIndex()); } storageProvider.archiveWorker(worker); ConcurrentMap<Integer, IMantisWorkerMetadata> workersMap = null; try { workersMap = archivedWorkersCache.getArchivedWorkerMap(worker.getJobId()); workersMap.putIfAbsent(worker.getWorkerNumber(), worker); } catch (ExecutionException e) { logger.warn("Error adding worker to archived cache {}", e); } if (logger.isTraceEnabled()) { logger.trace("Archived worker index=" + worker.getWorkerIndex()); } } public Optional<IMantisJobMetadata> getArchivedJob(final String jobId) { if (logger.isTraceEnabled()) { logger.trace("Get Archived Job {}", jobId);} final Optional<IMantisJobMetadata> jobOp = Optional.ofNullable(archivedJobsMetadataCache.getJob(jobId)); if (!jobOp.isPresent()) { logger.error("archivedJobsMetadataCache found no job for job ID {}", jobId); } if (logger.isTraceEnabled()) { logger.trace("Got archived Job {}", jobOp);} return jobOp; } public void archiveJob(IMantisJobMetadata job) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Archiving Job {}", job);} archivedJobsMetadataCache.add(job); storageProvider.archiveJob(job.getJobId().getId()); if (logger.isTraceEnabled()) { logger.trace("Archived Job {}", job.getJobId());} } /** * @param jobId * @param workerNumber * * @return */ public Optional<IMantisWorkerMetadata> getArchivedWorker(String jobId, int workerNumber) { try { ConcurrentMap<Integer, IMantisWorkerMetadata> workersMap = archivedWorkersCache.getArchivedWorkerMap(jobId); if (workersMap != null) { return Optional.ofNullable(workersMap.get(workerNumber)); } } catch (ExecutionException e) { logger.warn("Exception getting archived Worker {}", e); } return Optional.empty(); } public List<IMantisWorkerMetadata> getArchivedWorkers(String jobId) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Getting Archived workers for Job {}", jobId);} List archivedWorkers = new ArrayList<>(archivedWorkersCache.getArchivedWorkerMap(jobId).values()); if (logger.isTraceEnabled()) { logger.trace("Fetched archived {} workers for Job {}", archivedWorkers.size(), jobId); } return archivedWorkers; } private static class TerminatedJob implements Comparable<TerminatedJob> { private final String jobId; private final long terminatedTime; private TerminatedJob(String jobId, long terminatedTime) { this.jobId = jobId; this.terminatedTime = terminatedTime; } @Override public int compareTo(TerminatedJob o) { return Long.compare(terminatedTime, o.terminatedTime); } } private class ArchivedWorkersCache { private final Cache<String, ConcurrentMap<Integer, IMantisWorkerMetadata>> cache; ArchivedWorkersCache(int cacheSize) { cache = CacheBuilder .newBuilder() .maximumSize(cacheSize) .build(); } ConcurrentMap<Integer, IMantisWorkerMetadata> getArchivedWorkerMap(final String jobId) throws ExecutionException { return cache.get(jobId, () -> { List<IMantisWorkerMetadata> workers = storageProvider.getArchivedWorkers(jobId); ConcurrentMap<Integer, IMantisWorkerMetadata> theMap = new ConcurrentHashMap<>(); if (workers != null) { for (IMantisWorkerMetadata mwmd : workers) { theMap.putIfAbsent(mwmd.getWorkerNumber(), mwmd); } } return theMap; }); } void remove(String jobId) { cache.invalidate(jobId); } } private class ArchivedJobsMetadataCache { private final Cache<String, IMantisJobMetadata> cache; ArchivedJobsMetadataCache(int cacheSize) { cache = CacheBuilder .newBuilder() .maximumSize(cacheSize) .build(); } IMantisJobMetadata getJob(String jobId) { try { return cache.get(jobId, () -> loadArchivedJob(jobId)); } catch (ExecutionException e) { return null; } } private IMantisJobMetadata loadArchivedJob(String jobId) throws IOException, InvalidJobException, ExecutionException { if (logger.isTraceEnabled()) {logger.trace("Loading archived job {}", jobId);} final Optional<IMantisJobMetadata> jobMetadata = storageProvider.loadArchivedJob(jobId); if (!jobMetadata.isPresent()) throw new ExecutionException(new InvalidJobException(jobId)); if (logger.isTraceEnabled()) {logger.trace("Loaded archived job {}", jobMetadata);} return jobMetadata.get(); } void add(IMantisJobMetadata job) { cache.put(job.getJobId().getId(), job); } void remove(String jobId) { cache.invalidate(jobId); } } }
4,311
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence/ArchivedJobsLoaderActor.java
//package io.mantisrx.server.master.persistence; ////package com.netflix.mantis.master.persistence; //// //import akka.actor.AbstractActor; //import akka.actor.ActorRef; //import akka.actor.Props; //import akka.event.Logging; //import akka.event.LoggingAdapter; //import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; //import io.mantisrx.master.jobcluster.proto.BaseRequest; //import io.mantisrx.master.jobcluster.proto.BaseResponse; // //import java.util.List; // //import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR; //import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR; //import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS; // // //import org.slf4j.Logger; //import org.slf4j.LoggerFactory; // //import com.beust.jcommander.internal.Lists; //// //public class ArchivedJobsLoaderActor extends AbstractActor { //// // private final Logger logger = LoggerFactory.getLogger(ArchivedJobsLoaderActor.class); // private final MantisJobStore jobStore; //// //// private Map<String, JobClusterDAO> jobClusterMap = new HashMap<>(); //// private Map<String, JobDAO> jobMap = new HashMap<>(); //// // public static Props props(final MantisJobStore jobStore) { // return Props.create(ArchivedJobsLoaderActor.class, jobStore); // } //// // public ArchivedJobsLoaderActor(final MantisJobStore jobStore) { // this.jobStore = jobStore; // } //// // public static class LoadArchivedJobsRequest extends BaseRequest { // // public LoadArchivedJobsRequest() { // super(); // } // } // // // public static class LoadArchivedJobsResponse extends BaseResponse { // public final List<IMantisJobMetadata> archivedJobsList; // public LoadArchivedJobsResponse(long requestId, ResponseCode isSuccess, String message, List<IMantisJobMetadata> archivedList) { // super(requestId, isSuccess, message); // this.archivedJobsList = archivedList; // } // } // // private void onLoadArchivedJobs(final LoadArchivedJobsRequest request) { // ActorRef sender = getSender(); // try { // List<IMantisJobMetadata> archivedList = jobStore.loadAllArchivedJobs(); // sender.tell(new LoadArchivedJobsResponse(request.requestId, SUCCESS, "Loaded " + archivedList.size() + " archived jobs", archivedList), sender); // } catch(Exception e) { // sender.tell(new LoadArchivedJobsResponse(request.requestId, SERVER_ERROR, "Error loading archived jobs " + e.getMessage(), Lists.newArrayList(0)), sender); // } // } // @Override // public void preStart() throws Exception { // logger.info("Persistence Actor started"); // } // // @Override // public void postStop() throws Exception { // logger.info("Persistence Actor stopped"); // } // // @Override // public Receive createReceive() { // return receiveBuilder() // .match(LoadArchivedJobsRequest.class, r -> onLoadArchivedJobs(r)) // .matchAny(x -> logger.info("unexpected message '{}' received by ArchivedJobsLoaderActor actor ", x)) // .build(); // } //}
4,312
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence/MantisStorageProviderAdapter.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.persistence; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.jobcluster.IJobClusterMetadata; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.IMantisStageMetadata; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.server.master.domain.DataFormatAdapter; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob; import io.mantisrx.server.master.store.InvalidNamedJobException; import io.mantisrx.server.master.store.JobAlreadyExistsException; import io.mantisrx.server.master.store.JobNameAlreadyExistsException; import io.mantisrx.server.master.store.MantisJobMetadataWritable; import io.mantisrx.server.master.store.MantisStorageProvider; import io.mantisrx.server.master.store.MantisWorkerMetadataWritable; import io.mantisrx.server.master.store.NamedJob; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; public class MantisStorageProviderAdapter implements IMantisStorageProvider { private static final Logger logger = LoggerFactory.getLogger(MantisStorageProviderAdapter.class); private final MantisStorageProvider sProvider; private final LifecycleEventPublisher eventPublisher; public MantisStorageProviderAdapter(MantisStorageProvider actualStorageProvider, LifecycleEventPublisher eventPublisher) { this.sProvider = actualStorageProvider; this.eventPublisher = eventPublisher; } @Override public void storeNewJob(IMantisJobMetadata jobMetadata) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Enter storeNewJob {}", jobMetadata); } MantisJobMetadataWritable mjmw = DataFormatAdapter.convertMantisJobMetadataToMantisJobMetadataWriteable(jobMetadata); try { sProvider.storeNewJob(mjmw); } catch (JobAlreadyExistsException | IOException e) { throw new Exception(e); } if (logger.isTraceEnabled()) { logger.trace("Exit store New job");} } @Override public void updateJob(IMantisJobMetadata jobMetadata) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Enter updateJob {}", jobMetadata);} MantisJobMetadataWritable mjmw = DataFormatAdapter.convertMantisJobMetadataToMantisJobMetadataWriteable(jobMetadata); try { sProvider.updateJob(mjmw); } catch (io.mantisrx.server.master.store.InvalidJobException e) { throw new Exception(e); } if (logger.isTraceEnabled()) { logger.trace("Exit updateJob");} } @Override public void archiveJob(String jobId) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Enter archiveJob {}", jobId);} sProvider.archiveJob(jobId); if (logger.isTraceEnabled()) { logger.trace("Exit archiveJob");} } @Override public void deleteJob(String jobId) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Enter delete job {}", jobId);} try { sProvider.deleteJob(jobId); } catch (io.mantisrx.server.master.store.InvalidJobException e) { throw new Exception(e); } if (logger.isTraceEnabled()) { logger.trace("Exit deleteJob");} } @Override public void storeMantisStage(IMantisStageMetadata msmd) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Enter storeMantisStage {}", msmd);} sProvider.storeMantisStage(DataFormatAdapter.convertMantisStageMetadataToMantisStageMetadataWriteable(msmd)); if (logger.isTraceEnabled()) { logger.trace("Exit storeMantisStage");} } @Override public void updateMantisStage(IMantisStageMetadata msmd) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Enter updateMantisStage {}", msmd);} sProvider.updateMantisStage(DataFormatAdapter.convertMantisStageMetadataToMantisStageMetadataWriteable(msmd)); if (logger.isTraceEnabled()) { logger.trace("Exit updateMantisStage");} } @Override public void storeWorker(IMantisWorkerMetadata workerMetadata) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Enter storeWorker {}", workerMetadata);} sProvider.storeWorker(DataFormatAdapter.convertMantisWorkerMetadataToMantisWorkerMetadataWritable(workerMetadata)); if (logger.isTraceEnabled()) { logger.trace("Exit storeWorker");} } @Override public void storeWorkers(String jobId, List<IMantisWorkerMetadata> workers) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Enter storeWorkers {} for Job {}", workers.size(), jobId);} List<MantisWorkerMetadataWritable> convertedList = new ArrayList<>(workers.size()); for (IMantisWorkerMetadata worker : workers) { convertedList.add(DataFormatAdapter.convertMantisWorkerMetadataToMantisWorkerMetadataWritable(worker)); } sProvider.storeWorkers(jobId, convertedList); if (logger.isTraceEnabled()) { logger.trace("Exit storeWorkers");} } @Override public void storeAndUpdateWorkers(IMantisWorkerMetadata existingWorker, IMantisWorkerMetadata newWorker) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Enter storeAndUpdateWorkers");} try { sProvider.storeAndUpdateWorkers(DataFormatAdapter.convertMantisWorkerMetadataToMantisWorkerMetadataWritable(existingWorker), DataFormatAdapter.convertMantisWorkerMetadataToMantisWorkerMetadataWritable(newWorker)); } catch (io.mantisrx.server.master.store.InvalidJobException e) { throw new Exception(e); } if (logger.isTraceEnabled()) { logger.trace("Exit storeAndUpdateWorkers");} } @Override public void updateWorker(IMantisWorkerMetadata worker) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Enter updateWorker {}", worker);} sProvider.updateWorker(DataFormatAdapter.convertMantisWorkerMetadataToMantisWorkerMetadataWritable(worker)); if (logger.isTraceEnabled()) { logger.trace("Exit updateWorker");} } @Override public List<IMantisJobMetadata> loadAllJobs() throws IOException { logger.info("MantisStorageProviderAdapter:Enter loadAllJobs"); List<IMantisJobMetadata> jobMetas = Lists.newArrayList(); sProvider.initJobs().forEach((mw) -> { try { jobMetas.add(DataFormatAdapter.convertMantisJobWriteableToMantisJobMetadata(mw, eventPublisher)); } catch (Exception e) { e.printStackTrace(); logger.warn("Exception loading job {}", e.getMessage()); } }); logger.info("MantisStorageProviderAdapter:Exit loadAllJobs {}", jobMetas.size()); return jobMetas; } @Override public Observable<IMantisJobMetadata> loadAllArchivedJobs() { if (logger.isTraceEnabled()) { logger.trace("In StorageAdapter.loadAllArchivedJobs"); } return sProvider.initArchivedJobs().map((mjm) -> { try { if (logger.isDebugEnabled()) { logger.debug("Reading Archived Job {}", mjm); } IMantisJobMetadata archivedJob = DataFormatAdapter.convertMantisJobWriteableToMantisJobMetadata(mjm, eventPublisher, true); if (logger.isDebugEnabled()) { logger.debug("Read Archived Job {}", archivedJob); } return archivedJob; } catch (Exception e) { logger.error("Exception {} occurred converting archived job {}", e, Optional.ofNullable(mjm).map(j -> j.getJobId()).orElse("")); return null; } }) .filter((j) -> j != null) ; } @Override public List<IJobClusterMetadata> loadAllJobClusters() throws IOException { if (logger.isTraceEnabled()) { logger.trace("Enter StorageAdapter.loadAllJobClusters"); } List<IJobClusterMetadata> jobClusters = Lists.newArrayList(); List<NamedJob> namedJobList = sProvider.initNamedJobs(); AtomicInteger failedCount = new AtomicInteger(); AtomicInteger successCount = new AtomicInteger(); jobClusters = namedJobList .stream() .map((nJob) -> { try { IJobClusterMetadata jobClusterMetadata = DataFormatAdapter.convertNamedJobToJobClusterMetadata(nJob); successCount.getAndIncrement(); return jobClusterMetadata; } catch (Exception e) { logger.error("Exception {} converting {} ", e.getMessage(), nJob); logger.error("Exception is", e); failedCount.getAndIncrement(); } return null; }) .filter((jobClusterMeta) -> jobClusterMeta != null) .collect(Collectors.toList()); logger.info("Succesfully read and converted {} job clusters", successCount.get()); logger.info("Failed to read and converted {} job clusters", failedCount.get()); if (logger.isTraceEnabled()) { logger.trace("Exit StorageAdapter.loadAllJobClusters"); } return jobClusters; } @Override public List<CompletedJob> loadAllCompletedJobs() throws IOException { if (logger.isTraceEnabled()) { logger.trace("In StorageAdapter.loadAllCompletedJobs"); } List<CompletedJob> completedJobsList = Lists.newArrayList(); Observable<NamedJob.CompletedJob> namedJobCompletedJobs = sProvider.initNamedJobCompletedJobs(); AtomicInteger failedCount = new AtomicInteger(); AtomicInteger successCount = new AtomicInteger(); AtomicReference<String> errorMsg = new AtomicReference<>(""); namedJobCompletedJobs.map((completedJob) -> { try { CompletedJob convertedCompletedJob = DataFormatAdapter.convertNamedJobCompletedJobToCompletedJob(completedJob); successCount.getAndIncrement(); return convertedCompletedJob; } catch (Exception e) { logger.error("Exception {} converting {} ", e.getMessage(), completedJob); logger.error("Conversion errors is", e); failedCount.getAndIncrement(); } return null; }) .filter((convertedCompletedJob) -> convertedCompletedJob != null) .forEach((jb) -> completedJobsList.add(jb), error -> { errorMsg.set(error.getMessage()); }); if (!errorMsg.get().isEmpty()) { logger.error("Exception occurred loading completed jobs {}", errorMsg.get()); throw new IOException(errorMsg.get()); } logger.info("Succesfully read and converted {} job clusters", successCount.get()); logger.info("Failed to read and converted {} job clusters", failedCount.get()); if (logger.isTraceEnabled()) { logger.trace("Exit StorageAdapter.loadAllCompletedJobs"); } return completedJobsList; } @Override public void archiveWorker(IMantisWorkerMetadata mwmd) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Enter MantisStorageProviderAdapter:archiveWorker {}", mwmd); } sProvider.archiveWorker(DataFormatAdapter.convertMantisWorkerMetadataToMantisWorkerMetadataWritable(mwmd)); if (logger.isTraceEnabled()) { logger.trace("Exit MantisStorageProviderAdapter:archiveWorker {}", mwmd); } } @Override public List<IMantisWorkerMetadata> getArchivedWorkers(String jobId) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Enter MantisStorageProviderAdapter:getArchivedWorkers {}", jobId); } List<IMantisWorkerMetadata> archivedWorkers = Lists.newArrayList(); for (MantisWorkerMetadataWritable mantisWorkerMetadataWritable : sProvider.getArchivedWorkers(jobId)) { try { archivedWorkers.add(DataFormatAdapter.convertMantisWorkerMetadataWriteableToMantisWorkerMetadata(mantisWorkerMetadataWritable, eventPublisher).getMetadata()); } catch (Exception e) { logger.error("Exception {} converting {} ", e.getMessage(), mantisWorkerMetadataWritable); } } if (logger.isTraceEnabled()) { logger.trace("Exit MantisStorageProviderAdapter:getArchivedWorkers {} with {} workers", jobId, archivedWorkers.size()); } return archivedWorkers; } @Override public void createJobCluster(IJobClusterMetadata jobCluster) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Enter MantisStorageProviderAdapter:createJobCluster {}", jobCluster); } try { sProvider.storeNewNamedJob(DataFormatAdapter.convertJobClusterMetadataToNamedJob(jobCluster)); } catch (JobNameAlreadyExistsException e) { throw new Exception(e); } if (logger.isTraceEnabled()) { logger.trace("Exit MantisStorageProviderAdapter:createJobCluster {}", jobCluster); } } @Override public void updateJobCluster(IJobClusterMetadata jobCluster) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Enter MantisStorageProviderAdapter:updateJobCluster {}", jobCluster); } try { sProvider.updateNamedJob(DataFormatAdapter.convertJobClusterMetadataToNamedJob(jobCluster)); } catch (InvalidNamedJobException e) { throw new Exception(e); } if (logger.isTraceEnabled()) { logger.trace("Exit MantisStorageProviderAdapter:createJobCluster {}", jobCluster); } } @Override public void deleteJobCluster(String name) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Enter MantisStorageProviderAdapter:deleteJobCluster {}", name); } try { sProvider.deleteNamedJob(name); } catch (IOException e) { throw new Exception(e); } if (logger.isTraceEnabled()) { logger.trace("Exit MantisStorageProviderAdapter:createJobCluster {}", name); } } @Override public void storeCompletedJobForCluster(String name, CompletedJob job) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Enter MantisStorageProviderAdapter:storeCompletedJobForCluster {}", name); } sProvider.storeCompletedJobForNamedJob(name, DataFormatAdapter.convertCompletedJobToNamedJobCompletedJob(job)); if (logger.isTraceEnabled()) { logger.trace("Enter MantisStorageProviderAdapter:storeCompletedJobForCluster {}", name); } } @Override public void removeCompletedJobForCluster(String name, String jobId) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Enter MantisStorageProviderAdapter:removeCompletedJobForCluster {}", jobId); } sProvider.removeCompledtedJobForNamedJob(name, jobId); if (logger.isTraceEnabled()) { logger.trace("Exit MantisStorageProviderAdapter:removeCompletedJobForCluster {}", jobId); } } @Override public Optional<IMantisJobMetadata> loadArchivedJob(String jobId) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Enter MantisStorageProviderAdapter:loadArchivedJob {}", jobId); } IMantisJobMetadata mantisJobMetadata; try { MantisJobMetadataWritable archJob = sProvider.loadArchivedJob(jobId); mantisJobMetadata = (DataFormatAdapter.convertMantisJobWriteableToMantisJobMetadata(archJob, eventPublisher, true)); } catch (Exception e) { logger.error("Exception loading archived Job", e); return Optional.empty(); } if (logger.isTraceEnabled()) { logger.trace("Exit MantisStorageProviderAdapter:loadArchivedJob {}", jobId); } return Optional.ofNullable(mantisJobMetadata); } @Override public List<String> initActiveVmAttributeValuesList() throws IOException { return sProvider.initActiveVmAttributeValuesList(); } @Override public void setActiveVmAttributeValuesList(List<String> vmAttributesList) throws IOException { sProvider.setActiveVmAttributeValuesList(vmAttributesList); } }
4,313
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence/exceptions/InvalidWorkerStateChangeException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.persistence.exceptions; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.server.core.domain.WorkerId; public class InvalidWorkerStateChangeException extends Exception { /** * */ private static final long serialVersionUID = 6997193965197779136L; public InvalidWorkerStateChangeException(String jobId, WorkerState state) { super("Unexpected state " + state + " for job " + jobId); } public InvalidWorkerStateChangeException(String jobId, WorkerState state, Throwable t) { super("Unexpected state " + state + " for job " + jobId, t); } public InvalidWorkerStateChangeException(String jobId, WorkerId workerId, WorkerState fromState, WorkerState toState) { super("Invalid worker state transition of " + workerId.getId() + " from state " + fromState + " to " + toState); } public InvalidWorkerStateChangeException(String jobId, WorkerState fromState, WorkerState toState, Throwable cause) { super("Invalid worker state transition of job " + jobId + " from state " + fromState + " to " + toState, cause); } }
4,314
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence/exceptions/InvalidJobException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.persistence.exceptions; import io.mantisrx.server.master.domain.JobId; public class InvalidJobException extends Exception { /** * for serialization. */ private static final long serialVersionUID = -6012093609773859131L; public InvalidJobException(String id) { super(id); } public InvalidJobException(String id, Throwable cause) { super(id, cause); } public InvalidJobException(JobId jobId, int stageNum, int workerId) { super(jobId + ((stageNum >= 0) ? "-stage-" + stageNum : "") + ((workerId >= 0) ? "-worker-" + workerId : "")); } public InvalidJobException(JobId jobId, int stageNum, int workerId, Throwable cause) { super(jobId + ((stageNum >= 0) ? "-stage-" + stageNum : "") + ((workerId >= 0) ? "-worker-" + workerId : ""), cause); } public InvalidJobException(String jobId, int stageNum, int workerId) { super(jobId + ((stageNum >= 0) ? "-stage-" + stageNum : "") + ((workerId >= 0) ? "-worker-" + workerId : "")); } public InvalidJobException(String jobId, int stageNum, int workerId, Throwable cause) { super(jobId + ((stageNum >= 0) ? "-stage-" + stageNum : "") + ((workerId >= 0) ? "-worker-" + workerId : ""), cause); } }
4,315
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence/exceptions/InvalidJobStateChangeException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.persistence.exceptions; import io.mantisrx.master.jobcluster.job.JobState; public class InvalidJobStateChangeException extends Exception { /** * for serialization. */ private static final long serialVersionUID = 7215672111575922178L; public InvalidJobStateChangeException(String jobId, JobState state) { super("Unexpected state " + state + " for job " + jobId); } public InvalidJobStateChangeException(String jobId, JobState state, Throwable t) { super("Unexpected state " + state + " for job " + jobId, t); } public InvalidJobStateChangeException(String jobId, JobState fromState, JobState toState) { super("Invalid state transition of job " + jobId + " from state " + fromState + " to " + toState); } public InvalidJobStateChangeException(String jobId, JobState fromState, JobState toState, Throwable cause) { super("Invalid state transition of job " + jobId + " from state " + fromState + " to " + toState, cause); } }
4,316
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/persistence/exceptions/JobClusterAlreadyExistsException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.persistence.exceptions; public class JobClusterAlreadyExistsException extends Exception { /** * */ private static final long serialVersionUID = -1492003797257425141L; public JobClusterAlreadyExistsException(String jobClusterName) { super(jobClusterName); } public JobClusterAlreadyExistsException(String jobClusterName, Throwable cause) { super(jobClusterName, cause); } }
4,317
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/heartbeathandlers/HeartbeatPayloadHandler.java
///* // * Copyright 2019 Netflix, Inc. // * // * Licensed under the Apache License, Version 2.0 (the "License"); // * you may not use this file except in compliance with the License. // * You may obtain a copy of the License at // * // * http://www.apache.org/licenses/LICENSE-2.0 // * // * Unless required by applicable law or agreed to in writing, software // * distributed under the License is distributed on an "AS IS" BASIS, // * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // * See the License for the specific language governing permissions and // * limitations under the License. // */ // //package io.mantisrx.server.master.heartbeathandlers; // //import java.util.ArrayList; //import java.util.Collection; //import java.util.List; //import java.util.concurrent.ConcurrentHashMap; //import java.util.concurrent.ConcurrentMap; // //import io.mantisrx.runtime.MantisJobDurationType; //import io.mantisrx.server.core.Status; //import io.mantisrx.server.core.StatusPayloads; //import io.mantisrx.server.master.MantisJobMgr; //import io.mantisrx.server.master.store.MantisJobMetadata; //import io.mantisrx.server.master.store.MantisStageMetadata; //import io.reactivx.mantis.operators.DropOperator; //import org.slf4j.Logger; //import org.slf4j.LoggerFactory; //import rx.Observable; //import rx.Observer; //import rx.Subscriber; //import rx.functions.Action0; //import rx.functions.Action1; //import rx.functions.Func1; //import rx.observables.GroupedObservable; //import rx.observers.SerializedObserver; //import rx.schedulers.Schedulers; //import rx.subjects.PublishSubject; // // //public class HeartbeatPayloadHandler { // //// private static final HeartbeatPayloadHandler instance; //// //// static { //// instance = new HeartbeatPayloadHandler(); //// } //// //// public static HeartbeatPayloadHandler getInstance() { //// return instance; //// } //// //// public static class Data { //// //// private final String jobId; //// private final MantisJobMgr jobMgr; //// private final int stage; //// private final int workerIndex; //// private final int workerNumber; //// private final Status.Payload payload; //// //// public Data(String jobId, MantisJobMgr jobMgr, int stage, int workerIndex, int workerNumber, Status.Payload payload) { //// this.jobId = jobId; //// this.jobMgr = jobMgr; //// this.stage = stage; //// this.workerIndex = workerIndex; //// this.workerNumber = workerNumber; //// this.payload = payload; //// } //// //// String getJobId() { //// return jobId; //// } //// //// MantisJobMgr getJobMgr() { //// return jobMgr; //// } //// //// int getStage() { //// return stage; //// } //// //// int getWorkerIndex() { //// return workerIndex; //// } //// //// int getWorkerNumber() { //// return workerNumber; //// } //// //// Status.Payload getPayload() { //// return payload; //// } //// //// @Override //// public String toString() { //// return String.format("jobId=%s, stage=%d, worker index=%d, number=%d, payload type=%s", jobId, stage, //// workerIndex, workerNumber, (payload == null ? "none" : payload.getType())); //// } // } // // private static class PayloadOperator<T, R> implements Observable.Operator<String, Data> { // //// private final String jobId; //// private final Observer<Data> subscriptionStateObserver; //// private final Observer<Data> dataDropObserver; //// private final Observer<Data> resUsageObserver; //// private final List<Observer<Data>> observers = new ArrayList<>(); //// private final JobAutoScaler jobAutoScaler; //// //// private PayloadOperator(final String jobId, final MantisJobMgr jobMgr) { //// logger.info("Setting up payload operator for job " + jobId); //// this.jobId = jobId; //// if (jobMgr.hasJobMaster()) { //// // don't do anything but data drop handler for worker outlier detection, pass null autoscaler //// subscriptionStateObserver = null; //// dataDropObserver = new DataDropHandler(jobId, null, jobMgr).call(); //// observers.add(dataDropObserver); //// resUsageObserver = null; //// jobAutoScaler = null; //// } else { //// if (jobMgr.getJobMetadata() != null && //// jobMgr.getJobMetadata().getSla().getDurationType() == MantisJobDurationType.Transient) { //// subscriptionStateObserver = new SubscriptionStateHandler(jobId, jobMgr).call(); //// observers.add(subscriptionStateObserver); //// } else //// subscriptionStateObserver = null; //// if (isScalable(jobMgr)) //// jobAutoScaler = new JobAutoScaler(jobId, jobMgr); //// else //// jobAutoScaler = null; //// dataDropObserver = new DataDropHandler(jobId, //// jobAutoScaler == null ? null : jobAutoScaler.getObserver(), jobMgr).call(); //// observers.add(dataDropObserver); //// if (jobAutoScaler == null) //// resUsageObserver = null; //// else { //// resUsageObserver = new ResUsageHandler(jobId, jobAutoScaler.getObserver(), jobMgr).call(); //// observers.add(resUsageObserver); //// } //// if (jobAutoScaler != null) //// jobAutoScaler.start(); //// } //// logger.info("Done setting up payload operator for job " + jobId); //// } //// //// private boolean isScalable(MantisJobMgr jobMgr) { //// final MantisJobMetadata jobMetadata = jobMgr.getJobMetadata(); //// if (jobMetadata != null) { //// final Collection<? extends MantisStageMetadata> stageMetadata = jobMetadata.getStageMetadata(); //// for (MantisStageMetadata s : stageMetadata) { //// // scalable only if autoscaling config set and instances max > instances min for stage //// if (s.getScalable() && //// s.getScalingPolicy() != null && //// s.getScalingPolicy().getMax() > s.getScalingPolicy().getMin()) { //// return true; //// } //// } //// } //// return false; //// } //// //// @Override //// public Subscriber<? super Data> call(Subscriber<? super String> subscriber) { //// return new Subscriber<Data>() { //// @Override //// public void onCompleted() { //// logger.info("Completing payload handler for job " + jobId); //// for (Observer<Data> o : observers) //// o.onCompleted(); //// } //// //// @Override //// public void onError(Throwable e) { //// logger.error("Unexpected error: " + e.getMessage(), e); //// } //// //// @Override //// public void onNext(Data data) { //// //logger.info("Got data: " + data); //// switch (StatusPayloads.Type.valueOf(data.payload.getType())) { //// case SubscriptionState: //// if (subscriptionStateObserver != null) //// subscriptionStateObserver.onNext(data); //// break; //// case IncomingDataDrop: //// dataDropObserver.onNext(data); //// break; //// case ResourceUsage: //// if (resUsageObserver != null) //// resUsageObserver.onNext(data); //// break; //// default: //// logger.warn("Unknown status payload " + data.payload.getType() + " in heartbeat for job " + //// jobId + " worker index " + data.workerIndex + " number " + data.workerNumber); //// } //// } //// }; //// } // } // // private static final Logger logger = LoggerFactory.getLogger(HeartbeatPayloadHandler.class); // private final Observer<Data> observer; // private ConcurrentMap<String, Subscriber<? super Integer>> inners = new ConcurrentHashMap<>(); // private Func1<String, MantisJobMgr> jobMgrGetter = null; // // private HeartbeatPayloadHandler() { // PublishSubject<Data> subject = PublishSubject.create(); // observer = new SerializedObserver<>(subject); // subject // .lift(new DropOperator<Data>("JobHeartbeatsHandler")) // .groupBy(new Func1<Data, MantisJobMgr>() { // @Override // public MantisJobMgr call(Data data) { // return data.getJobMgr(); // } // }) // .doOnError(new Action1<Throwable>() { // @Override // public void call(Throwable throwable) { // logger.warn("Heartbeats handler error: " + throwable.getMessage(), throwable); // } // }) // .flatMap(new Func1<GroupedObservable<MantisJobMgr, Data>, Observable<?>>() { // @Override // public Observable<?> call(final GroupedObservable<MantisJobMgr, Data> go) { // final String jobId = go.getKey().getJobId(); // logger.info("handling HB for job " + jobId); // final Observable<Integer> iO = Observable.create(new Observable.OnSubscribe<Integer>() { // @Override // public void call(final Subscriber<? super Integer> subscriber) { // inners.put(jobId, subscriber); // logger.info("Subscribed to inner for " + (go.getKey() == null ? "nullJobMgr" : jobId)); // } // }); // MantisJobMgr jobMgr = jobMgrGetter.call(jobId); // if (jobMgr == null) { // logger.warn("jobMgr NULL when setting up payload operator for job {}", jobId); // return Observable.empty(); // } else { // return go // .takeUntil(iO) // .observeOn(Schedulers.computation()) // .lift(new PayloadOperator<>(jobId, jobMgr)) // .doOnUnsubscribe(() -> logger.info("Unsubscribe: job " + jobId)); // } // } // }) // .onErrorResumeNext(new Func1<Throwable, Observable<?>>() { // @Override // public Observable<?> call(Throwable throwable) { // logger.error("UNEXPECTED error in heartbeats handler: " + throwable.getMessage(), throwable); // return Observable.empty(); // } // }) // .doOnUnsubscribe(new Action0() { // @Override // public void call() { // logger.error("Unexpected: main heartbeats handler Observable was unsubscribed"); // } // }) // .subscribe(); // } // // public void handle(Data data) { // observer.onNext(data); // //logger.info("Done onNext'ing heartbeat for job " + data.getJobId() + " +, worker " + data.getWorkerNumber()); // } // // public void completeJob(MantisJobMgr jobMgr) { // logger.info("Completing observable chain for job " + jobMgr.getJobId()); // final Subscriber<? super Integer> subscriber = inners.get(jobMgr.getJobId()); // if (subscriber != null) { // subscriber.onNext(1); // subscriber.onCompleted(); // inners.remove(jobMgr.getJobId(), subscriber); // } // } // // public void setJobMetadataGetter(Func1<String, MantisJobMgr> jobMgrGetter) { // this.jobMgrGetter = jobMgrGetter; // } //}
4,318
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/heartbeathandlers/DataDropHandler.java
//package io.mantisrx.server.master.heartbeathandlers; // //import com.fasterxml.jackson.databind.DeserializationFeature; //import com.fasterxml.jackson.databind.ObjectMapper; // //import io.mantisrx.runtime.descriptor.StageScalingPolicy; //import io.mantisrx.server.core.ServiceRegistry; //import io.mantisrx.server.core.StatusPayloads; //import io.mantisrx.server.core.WorkerOutlier; //import io.mantisrx.server.master.store.InvalidJobException; //import io.mantisrx.server.master.store.InvalidJobStateChangeException; //import io.mantisrx.server.master.MantisJobMgr; //import io.mantisrx.server.master.store.MantisWorkerMetadata; //import io.reactivx.mantis.operators.DropOperator; // // //import org.slf4j.Logger; //import org.slf4j.LoggerFactory; // //import rx.Observable; //import rx.Observer; //import rx.Subscriber; //import rx.functions.Action0; //import rx.functions.Action1; //import rx.functions.Func1; //import rx.observables.GroupedObservable; //import rx.observers.SerializedObserver; //import rx.schedulers.Schedulers; //import rx.subjects.PublishSubject; // //import java.io.IOException; //import java.util.ArrayList; //import java.util.Date; //import java.util.List; //import java.util.Map; //import java.util.concurrent.ConcurrentHashMap; //import java.util.concurrent.ConcurrentMap; //import java.util.concurrent.ScheduledThreadPoolExecutor; //import java.util.concurrent.TimeUnit; // ///* package */ class DataDropHandler implements PayloadExecutor { // // private static volatile boolean resubmitOutlierWorker=true; // private static final String resubmitOutlierWorkerProp = "mantis.master.outlier.worker.resubmit"; // static { // new ScheduledThreadPoolExecutor(1).scheduleWithFixedDelay(new Runnable() { // @Override // public void run() { // resubmitOutlierWorker = Boolean.valueOf( // ServiceRegistry.INSTANCE.getPropertiesService().getStringValue(resubmitOutlierWorkerProp, // ""+resubmitOutlierWorker)); // } // }, 0, 60, TimeUnit.SECONDS); // } // // private static final Logger logger = LoggerFactory.getLogger(DataDropHandler.class); // private final String jobId; // private final PublishSubject<HeartbeatPayloadHandler.Data> dataDropSubject = PublishSubject.create(); // private final ObjectMapper objectMapper = new ObjectMapper(); // private final Observer<JobAutoScaler.Event> jobAutoScaleObserver; // private final MantisJobMgr jobMgr; // private ConcurrentMap<Integer, Subscriber<? super Integer>> inners = new ConcurrentHashMap<>(); // // DataDropHandler(String jobId, Observer<JobAutoScaler.Event> jobAutoScaleObserver, MantisJobMgr jobMgr) { // this.jobId = jobId; // objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); // this.jobAutoScaleObserver = jobAutoScaleObserver; // this.jobMgr = jobMgr; // } // // private static class DroppedData { // private final long when; // private final double value; // private DroppedData(long when, double value) { // this.when = when; // this.value = value; // } // public long getWhen() { // return when; // } // public double getValue() { // return value; // } // } // // private class StageDataDropOperator2<T,R> implements Observable.Operator<Object, HeartbeatPayloadHandler.Data> { // // private final long timeInPastThresholdSecs=60; // private final long killCooldownSecs=600; // private volatile long lastKilledWorkerAt=System.currentTimeMillis(); // private final ConcurrentMap<Integer, List<DroppedData>> workersMap = new ConcurrentHashMap<>(); // //private final Map<Integer, List<Boolean>> isOutlierMap = new HashMap<>(); // private final int stage; // final WorkerOutlier workerOutlier; // // StageDataDropOperator2(final int stage) { // this.stage = stage; // workerOutlier = new WorkerOutlier(killCooldownSecs, new Action1<Integer>() { // @Override // public void call(Integer workerIndex) { // try { // final MantisWorkerMetadata worker = jobMgr.getJobMetadata().getWorkerByIndex(stage, workerIndex); // if (resubmitOutlierWorker) // jobMgr.resubmitWorker(worker.getWorkerNumber(), // "dropping excessive data compared to others in stage"); // logger.warn((new Date()) + ": " + (resubmitOutlierWorker ? "" : "(not) ") + // "Killing worker idx " + worker.getWorkerIndex() + " of job " + worker.getJobId() + // ", stg " + stage + ", wrkNmbr " + worker.getWorkerNumber() + // ": it has been dropping excessive data for a while, compared to others"); // } catch (InvalidJobException | InvalidJobStateChangeException e) { // logger.warn("Can't resubmit outlier worker: " + e.getMessage(), e); // } // } // }); // } // // private void addDataPoint(int workerIndex, double value) { // final int numWorkers = jobMgr.getJobMetadata().getStageMetadata(stage).getNumWorkers(); // workerOutlier.addDataPoint(workerIndex, value, numWorkers); // if (jobAutoScaleObserver != null) { // addDroppedData(workerIndex, value); // // remove any data for workers with index that don't exist anymore (happens when stage scales down) // int maxIdx = 0; // for (Integer idx : workersMap.keySet()) // maxIdx = Math.max(maxIdx, idx); // for (int idx = numWorkers; idx < maxIdx; idx++) { // workersMap.remove(idx); // } // } // } // // private void addDroppedData(int workerIndex, double value) { // List<DroppedData> droppedDataList = workersMap.get(workerIndex); // if(droppedDataList==null) // workersMap.put(workerIndex, new ArrayList<DroppedData>()); // droppedDataList = workersMap.get(workerIndex); // while(!droppedDataList.isEmpty()) { // if(droppedDataList.get(droppedDataList.size()-1).when < (System.currentTimeMillis()-timeInPastThresholdSecs*1000)) // droppedDataList.remove(droppedDataList.size()-1); // remove last // else // break; // } // droppedDataList.add(0, new DroppedData(System.currentTimeMillis(), value)); // add to the beginning of list // } // // @Override // public Subscriber<? super HeartbeatPayloadHandler.Data> call(final Subscriber<? super Object> child) { // if (jobAutoScaleObserver != null) { // child.add(Schedulers.computation().createWorker().schedulePeriodically( // new Action0() { // @Override // public void call() { // long timeScope = System.currentTimeMillis() - timeInPastThresholdSecs*1000; // double avgOfAvgs=0.0; // for(Map.Entry<Integer, List<DroppedData>> entry: workersMap.entrySet()) { // double average=0.0; // int count=0; // for(DroppedData dd: entry.getValue()) { // if(dd.when>timeScope) { // average += dd.value; // count++; // } // } // if(count > 0) // average /= count; //// logger.info("Job " + jobId + " stage " + stage + " index " + entry.getKey() + " has " + average + "% average data drop from " + //// count + " data points"); // avgOfAvgs += average; // // ToDo need better math to figure out if stage needs autoscaling // } // if(workersMap.size()>0) // avgOfAvgs /= workersMap.size(); // if(avgOfAvgs>1.0) // logger.info("Job " + jobId + " stage " + stage + " has " + avgOfAvgs + "% average data drop from " + // workersMap.size() + " of its workers"); // jobAutoScaleObserver.onNext( // new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.DataDrop, stage, avgOfAvgs, "")); // } // }, 30, 30, TimeUnit.SECONDS // TODO make it configurable // )); // } else { // logger.info("DataDropHandler starting only for worker outlier detection of job " + jobId); // } // return new Subscriber<HeartbeatPayloadHandler.Data>() { // @Override // public void onCompleted() { // logger.info("**** onCompleted"); // workerOutlier.completed(); // child.unsubscribe(); // } // @Override // public void onError(Throwable e) { // logger.error("Unexpected error: " + e.getMessage(), e); // } // @Override // public void onNext(HeartbeatPayloadHandler.Data data) { // try { // StatusPayloads.DataDropCounts dataDrop = objectMapper.readValue(data.getPayload().getData(), StatusPayloads.DataDropCounts.class); // double droppedPercentage = (double)dataDrop.getDroppedCount()*100.0 / // (double)(dataDrop.getDroppedCount()+dataDrop.getOnNextCount()); // addDataPoint(data.getWorkerIndex(), droppedPercentage); // } catch (IOException e) { // logger.error("Invalid json for dataDrop heartbeat payload for job " + jobId + ", stage " + // data.getStage() + " index " + data.getWorkerIndex() + " number " + data.getWorkerNumber() + // ": " + e.getMessage()); // } // } // }; // } // } // // @Override // public Observer<HeartbeatPayloadHandler.Data> call() { // start(); // return new SerializedObserver<>(dataDropSubject); // } // // private void start() { // dataDropSubject // .groupBy(new Func1<HeartbeatPayloadHandler.Data, Integer>() { // @Override // public Integer call(HeartbeatPayloadHandler.Data data) { // return data.getStage(); // } // }) // .lift(new DropOperator<GroupedObservable<Integer, HeartbeatPayloadHandler.Data>>(DataDropHandler.class.getName())) // .flatMap(new Func1<GroupedObservable<Integer, HeartbeatPayloadHandler.Data>, Observable<?>>() { // @Override // public Observable<?> call(final GroupedObservable<Integer, HeartbeatPayloadHandler.Data> go) { // final Observable<Integer> iO = Observable.create(new Observable.OnSubscribe<Integer>() { // @Override // public void call(Subscriber<? super Integer> subscriber) { // inners.put(go.getKey(), subscriber); // logger.info("Subscribed to inner of stage " + go.getKey() + " of job " + jobId); // } // }); // return go // .takeUntil(iO) // .lift(new StageDataDropOperator2<>(go.getKey())) // .doOnUnsubscribe(new Action0() { // @Override // public void call() { // logger.info("Unsubscribed stage " + go.getKey() + " of job " + jobId); // } // }); // } // }) // .onErrorResumeNext(new Func1<Throwable, Observable<?>>() { // @Override // public Observable<?> call(Throwable throwable) { // logger.warn("Unexpected error job " + jobId + ": " + throwable.getMessage(), throwable); // return Observable.empty(); // } // }) // .doOnUnsubscribe(new Action0() { // @Override // public void call() { // for(Subscriber s: inners.values()) { // s.onNext(1); // s.onCompleted(); // } // inners.clear(); // } // }) // .subscribe(); // } //}
4,319
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/heartbeathandlers/ResUsageHandler.java
//package io.mantisrx.server.master.heartbeathandlers; // //import com.fasterxml.jackson.databind.ObjectMapper; //import io.mantisrx.runtime.descriptor.StageScalingPolicy; //import io.mantisrx.server.core.StatusPayloads; //import io.mantisrx.server.master.MantisJobMgr; //import io.reactivx.mantis.operators.DropOperator; //import org.slf4j.Logger; //import org.slf4j.LoggerFactory; //import rx.Observable; //import rx.Observer; //import rx.Subscriber; //import rx.Subscription; //import rx.functions.Action0; //import rx.functions.Action1; //import rx.functions.Func1; //import rx.observables.GroupedObservable; //import rx.observers.SerializedObserver; //import rx.schedulers.Schedulers; //import rx.subjects.PublishSubject; // //import java.io.IOException; //import java.util.ArrayList; //import java.util.HashMap; //import java.util.List; //import java.util.Map; //import java.util.concurrent.TimeUnit; //import java.util.concurrent.atomic.AtomicReference; // ///* package */ class ResUsageHandler implements PayloadExecutor { // private static final Logger logger = LoggerFactory.getLogger(ResUsageHandler.class); // private final String jobId; // private final PublishSubject<HeartbeatPayloadHandler.Data> resUsageSubject = PublishSubject.create(); // private final ObjectMapper objectMapper = new ObjectMapper(); // private final Observer<JobAutoScaler.Event> jobAutoScaleObserver; // private final MantisJobMgr jobMgr; // // public ResUsageHandler(String jobId, Observer<JobAutoScaler.Event> jobAutoScaleObserver, MantisJobMgr jobMgr) { // this.jobId = jobId; // this.jobAutoScaleObserver = jobAutoScaleObserver; // this.jobMgr = jobMgr; // } // // private String getStagedMetricName(String metricName, int stageNum) { // return metricName + "-stage-" + stageNum; // } // // @Override // public Observer<HeartbeatPayloadHandler.Data> call() { // start(); // return new SerializedObserver<>(resUsageSubject); // } // // private static class ResUsageData { // private final long when; // private final StatusPayloads.ResourceUsage usage; // private ResUsageData(long when, StatusPayloads.ResourceUsage usage) { // this.when = when; // this.usage = usage; // } // private long getWhen() { // return when; // } // private StatusPayloads.ResourceUsage getUsage() { // return usage; // } // } // // private ResUsageData getAverages(List<ResUsageData> usages) { // double cpuLimit=0.0; // double cpuUsageCurrent=0.0; // double cpuUsagePeak=0.0; // double memLimit=0.0; // double memCacheCurrent=0.0; // double memCachePeak=0.0; // double totMemUsageCurrent=0.0; // double totMemUsagePeak=0.0; // double nwBytesCurrent=0.0; // double nwBytesPeak=0.0; // int n=0; // for(ResUsageData usageData: usages) { // n++; // StatusPayloads.ResourceUsage u = usageData.getUsage(); // cpuLimit = u.getCpuLimit(); // cpuUsageCurrent = ((cpuUsageCurrent*(n-1)) + u.getCpuUsageCurrent())/(double)n; // cpuUsagePeak = ((cpuUsagePeak*(n-1)) + u.getCpuUsagePeak())/(double)n; // memLimit = u.getMemLimit(); // memCacheCurrent = ((memCacheCurrent*(n-1)) + u.getMemCacheCurrent())/(double)n; // memCachePeak = ((memCachePeak*(n-1)) + u.getMemCachePeak())/(double)n; // totMemUsageCurrent = ((totMemUsageCurrent*(n-1)) + u.getTotMemUsageCurrent())/(double)n; // totMemUsagePeak = ((totMemUsagePeak*(n-1)) + u.getTotMemUsagePeak())/(double)n; // nwBytesCurrent = ((nwBytesCurrent*(n-1)) + u.getNwBytesCurrent())/(double)n; // nwBytesPeak = ((nwBytesPeak*(n-1)) + u.getNwBytesPeak())/(double)n; // } // return new ResUsageData(System.currentTimeMillis(), // new StatusPayloads.ResourceUsage(cpuLimit, cpuUsageCurrent, cpuUsagePeak, memLimit, // memCacheCurrent, memCachePeak, totMemUsageCurrent, totMemUsagePeak, nwBytesCurrent, nwBytesPeak)); // } // // private class StageResUsageOperator<T,R> implements Observable.Operator<Object, HeartbeatPayloadHandler.Data> { // // private final int stage; // private final int valuesToKeep=2; // private final Map<Integer, List<ResUsageData>> workersMap=new HashMap<>(); // public StageResUsageOperator(int stage) { // logger.info("setting operator for " + jobId + " stage " + stage); // this.stage = stage; // } // // private void addDataPoint(int workerIndex, StatusPayloads.ResourceUsage usage) { // List<ResUsageData> usageDataList = workersMap.get(workerIndex); // if(usageDataList==null) { // workersMap.put(workerIndex, new ArrayList<ResUsageData>()); // usageDataList = workersMap.get(workerIndex); // } // usageDataList.add(new ResUsageData(System.currentTimeMillis(), usage)); // if(usageDataList.size()>valuesToKeep) // usageDataList.remove(0); // // remove any data for workers with index that don't exist anymore (happens when stage scales down) // int maxIdx=0; // for(Integer idx: workersMap.keySet()) // maxIdx = Math.max(maxIdx, idx); // for(int idx=jobMgr.getJobMetadata().getStageMetadata(stage).getNumWorkers(); idx<=maxIdx; idx++) // workersMap.remove(idx); // } // // @Override // public Subscriber<? super HeartbeatPayloadHandler.Data> call(final Subscriber<? super Object> child) { // child.add(Schedulers.computation().createWorker().schedulePeriodically( // new Action0() { // @Override // public void call() { // List<ResUsageData> listOfAvgs = new ArrayList<>(); // for (Map.Entry<Integer, List<ResUsageData>> entry : workersMap.entrySet()) { // listOfAvgs.add(getAverages(entry.getValue())); // } // ResUsageData avgOfAvgs = getAverages(listOfAvgs); // logger.debug("Job " + jobId + " stage " + stage + " avgResUsage from " + // workersMap.size() + " workers: " + avgOfAvgs.getUsage()); // jobAutoScaleObserver.onNext( // new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.CPU, stage, // avgOfAvgs.getUsage().getCpuUsageCurrent(), "")); // jobAutoScaleObserver.onNext( // new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Memory, stage, // avgOfAvgs.getUsage().getTotMemUsageCurrent(), "")); // jobAutoScaleObserver.onNext( // new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Network, stage, // avgOfAvgs.getUsage().getNwBytesCurrent(), "")); // } // }, 30, 30, TimeUnit.SECONDS // TODO make it configurable // )); // return new Subscriber<HeartbeatPayloadHandler.Data>() { // @Override // public void onCompleted() { // child.unsubscribe(); // } // @Override // public void onError(Throwable e) { // logger.error("Unexpected error: " + e.getMessage(), e); // } // @Override // public void onNext(HeartbeatPayloadHandler.Data data) { // try { // StatusPayloads.ResourceUsage usage= objectMapper.readValue(data.getPayload().getData(), StatusPayloads.ResourceUsage.class); //// logger.info("Got resource usage of job " + jobId + " stage " + stage + //// ", worker " + data.getWorkerNumber() + ": " + usage); // addDataPoint(data.getWorkerIndex(), usage); // } catch (IOException e) { // logger.error("Invalid json for dataDrop heartbeat payload for job " + jobId + ", stage " + // data.getStage() + " index " + data.getWorkerIndex() + " number " + data.getWorkerNumber() + // ": " + e.getMessage()); // } // } // }; // } // } // // private void start() { // final AtomicReference<List<Subscription>> ref = new AtomicReference<List<Subscription>>(new ArrayList<Subscription>()); // resUsageSubject // .groupBy(new Func1<HeartbeatPayloadHandler.Data, Integer>() { // @Override // public Integer call(HeartbeatPayloadHandler.Data data) { // return data.getStage(); // } // }) // .lift(new DropOperator<GroupedObservable<Integer, HeartbeatPayloadHandler.Data>>(ResUsageHandler.class.getName())) // .doOnNext(new Action1<GroupedObservable<Integer, HeartbeatPayloadHandler.Data>>() { // @Override // public void call(GroupedObservable<Integer, HeartbeatPayloadHandler.Data> go) { // final Integer stage = go.getKey(); // final Subscription s = go // .lift(new StageResUsageOperator<>(stage)) // .subscribe(); // ref.get().add(s); // } // }) // .doOnUnsubscribe(new Action0() { // @Override // public void call() { // for(Subscription s: ref.get()) // s.unsubscribe(); // } // }) // .subscribe(); // } //}
4,320
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/heartbeathandlers/PayloadExecutor.java
//package io.mantisrx.server.master.heartbeathandlers; // //import rx.Observer; // //public interface PayloadExecutor { // public Observer<HeartbeatPayloadHandler.Data> call(); //}
4,321
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/heartbeathandlers/JobAutoScaler.java
///* // * Copyright 2019 Netflix, Inc. // * // * Licensed under the Apache License, Version 2.0 (the "License"); // * you may not use this file except in compliance with the License. // * You may obtain a copy of the License at // * // * http://www.apache.org/licenses/LICENSE-2.0 // * // * Unless required by applicable law or agreed to in writing, software // * distributed under the License is distributed on an "AS IS" BASIS, // * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // * See the License for the specific language governing permissions and // * limitations under the License. // */ // //package io.mantisrx.server.master.heartbeathandlers; // //import java.util.ArrayList; //import java.util.HashMap; //import java.util.List; //import java.util.Map; //import java.util.concurrent.atomic.AtomicReference; // //import io.mantisrx.runtime.descriptor.StageScalingPolicy; //import io.mantisrx.server.core.stats.UsageDataStats; //import io.mantisrx.server.master.MantisJobMgr; //import io.mantisrx.server.master.store.InvalidJobException; //import io.mantisrx.server.master.store.MantisStageMetadata; //import org.slf4j.Logger; //import org.slf4j.LoggerFactory; //import rx.Observable; //import rx.Observer; //import rx.Subscriber; //import rx.Subscription; //import rx.functions.Action0; //import rx.functions.Action1; //import rx.functions.Func1; //import rx.observables.GroupedObservable; //import rx.observers.SerializedObserver; //import rx.subjects.PublishSubject; // // //class JobAutoScaler { // // static class Event { // // private final StageScalingPolicy.ScalingReason type; // private final int stage; // private final double value; // private final String message; // // public Event(StageScalingPolicy.ScalingReason type, int stage, double value, String message) { // this.type = type; // this.stage = stage; // this.value = value; // this.message = message; // } // // public StageScalingPolicy.ScalingReason getType() { // return type; // } // // public int getStage() { // return stage; // } // // public double getValue() { // return value; // } // // public String getMessage() { // return message; // } // // @Override // public String toString() { // return "type=" + type + ", stage=" + stage + ", value=" + value + ", message=" + message; // } // } // // private class StageScaleOperator<T, R> implements Observable.Operator<Object, Event> { // // private final int stage; // private final MantisStageMetadata stageMetadata; // private volatile long lastScaledAt = 0L; // // private StageScaleOperator(int stage, MantisStageMetadata stageMetadata) { // this.stage = stage; // this.stageMetadata = stageMetadata; // } // // @Override // public Subscriber<? super Event> call(final Subscriber<? super Object> child) { // // return new Subscriber<Event>() { // private final Map<StageScalingPolicy.ScalingReason, UsageDataStats> dataStatsMap = new HashMap<>(); // // @Override // public void onCompleted() { // child.unsubscribe(); // } // // @Override // public void onError(Throwable e) { // logger.error("Unexpected error: " + e.getMessage(), e); // } // // @Override // public void onNext(Event event) { // final StageScalingPolicy scalingPolicy = stageMetadata.getScalingPolicy(); // long coolDownSecs = scalingPolicy == null ? Long.MAX_VALUE : scalingPolicy.getCoolDownSecs(); // boolean scalable = stageMetadata.getScalable() && scalingPolicy != null && scalingPolicy.isEnabled(); // //logger.info("Will check for autoscaling job " + jobId + " stage " + stage + " due to event: " + event); // if (scalable && scalingPolicy != null) { // final StageScalingPolicy.Strategy strategy = scalingPolicy.getStrategies().get(event.getType()); // if (strategy != null) { // double effectiveValue = getEffectiveValue(event.getType(), event.getValue()); // UsageDataStats stats = dataStatsMap.get(event.getType()); // if (stats == null) { // stats = new UsageDataStats( // strategy.getScaleUpAbovePct(), strategy.getScaleDownBelowPct(), strategy.getRollingCount()); // dataStatsMap.put(event.getType(), stats); // } // stats.add(effectiveValue); // if (lastScaledAt < (System.currentTimeMillis() - coolDownSecs * 1000)) { // logger.info(jobId + ", stage " + stage + ": eff=" + // String.format(PercentNumberFormat, effectiveValue) + ", thresh=" + strategy.getScaleUpAbovePct()); // if (stats.getHighThreshTriggered()) { // logger.info("Attempting to scale up stage " + stage + " of job " + jobId + " by " + // scalingPolicy.getIncrement() + " workers, because " + // event.type + " exceeded scaleUpThreshold of " + // String.format(PercentNumberFormat, strategy.getScaleUpAbovePct()) + " " + // stats.getCurrentHighCount() + " times"); // try { // int scaledUp = // jobMgr.scaleUpStage(stage, scalingPolicy.getIncrement(), event.getType() + " with value " + // String.format(PercentNumberFormat, effectiveValue) + // " exceeded scaleUp threshold of " + strategy.getScaleUpAbovePct()); // if (scaledUp > 0) // lastScaledAt = System.currentTimeMillis(); // } catch (InvalidJobException e) { // logger.error("Couldn't scale up job " + jobId + " stage " + stage + ": " + e.getMessage()); // } // } else if (stats.getLowThreshTriggered()) { // logger.info("Attempting to scale down stage " + stage + " of job " + jobId + " by " + // scalingPolicy.getDecrement() + " workers because " + event.getType() + // " is below scaleDownThreshold of " + strategy.getScaleDownBelowPct() + // " " + stats.getCurrentLowCount() + " times"); // try { // final int scaledDown = // jobMgr.scaleDownStage(stage, scalingPolicy.getDecrement(), event.getType() + " with value " + // String.format(PercentNumberFormat, effectiveValue) + // " is below scaleDown threshold of " + strategy.getScaleDownBelowPct()); // if (scaledDown > 0) // lastScaledAt = System.currentTimeMillis(); // } catch (InvalidJobException e) { // logger.error("Couldn't scale down job " + jobId + " stage " + stage + ": " + e.getMessage()); // } // } // } // } // } // } // // private double getEffectiveValue(StageScalingPolicy.ScalingReason type, double value) { // switch (type) { // case CPU: // return 100.0 * value / stageMetadata.getMachineDefinition().getCpuCores(); // case Memory: // return 100.0 * value / stageMetadata.getMachineDefinition().getMemoryMB(); // case DataDrop: // return value; // case Network: // // value is in bytes, multiply by 8, divide by M // return 100.0 * value * 8 / (1024.0 * 1024.0 * stageMetadata.getMachineDefinition().getNetworkMbps()); // default: // logger.warn("Unsupported type " + type); // return 0.0; // } // } // }; // } // } // // private static final Logger logger = LoggerFactory.getLogger(JobAutoScaler.class); // private static final String PercentNumberFormat = "%5.2f"; // private final String jobId; // private final MantisJobMgr jobMgr; // private final PublishSubject<Event> subject; // // JobAutoScaler(String jobId, MantisJobMgr jobMgr) { // this.jobId = jobId; // this.jobMgr = jobMgr; // subject = PublishSubject.create(); // } // // Observer<Event> getObserver() { // return new SerializedObserver<>(subject); // } // // void start() { // final AtomicReference<List<Subscription>> ref = new AtomicReference<List<Subscription>>(new ArrayList<Subscription>()); // subject // .groupBy(new Func1<Event, Integer>() { // @Override // public Integer call(Event event) { // return event.getStage(); // } // }) // .doOnNext(new Action1<GroupedObservable<Integer, Event>>() { // @Override // public void call(GroupedObservable<Integer, Event> go) { // Integer stage = go.getKey(); // final StageScalingPolicy scalingPolicy = jobMgr.getJobMetadata().getStageMetadata(stage).getScalingPolicy(); // logger.info("Setting up stage scale operator for job " + jobId + " stage " + stage); // final Subscription s = go // .lift(new StageScaleOperator<>(stage, jobMgr.getJobMetadata().getStageMetadata(stage))) // .subscribe(); // ref.get().add(s); // } // }) // .doOnUnsubscribe(new Action0() { // @Override // public void call() { // logger.info("Unsubscribing for autoscaler of job " + jobId); // for (Subscription s : ref.get()) // s.unsubscribe(); // } // }) // .subscribe(); // } //}
4,322
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/heartbeathandlers/SubscriptionStateHandler.java
///* // * Copyright 2019 Netflix, Inc. // * // * Licensed under the Apache License, Version 2.0 (the "License"); // * you may not use this file except in compliance with the License. // * You may obtain a copy of the License at // * // * http://www.apache.org/licenses/LICENSE-2.0 // * // * Unless required by applicable law or agreed to in writing, software // * distributed under the License is distributed on an "AS IS" BASIS, // * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // * See the License for the specific language governing permissions and // * limitations under the License. // */ // //package io.mantisrx.server.master.heartbeathandlers; // //import java.util.concurrent.ScheduledFuture; //import java.util.concurrent.ScheduledThreadPoolExecutor; //import java.util.concurrent.TimeUnit; //import java.util.concurrent.atomic.AtomicReference; // //import io.mantisrx.common.metrics.Counter; //import io.mantisrx.common.metrics.Metrics; //import io.mantisrx.common.metrics.MetricsRegistry; //import io.mantisrx.runtime.MantisJobDurationType; //import io.mantisrx.server.master.MantisJobMgr; //import org.slf4j.Logger; //import org.slf4j.LoggerFactory; //import rx.Observer; // // //class SubscriptionStateHandler implements PayloadExecutor { // // private static final Logger logger = LoggerFactory.getLogger(SubscriptionStateHandler.class); // private final AtomicReference<ScheduledFuture> timedOutExitFutureRef = new AtomicReference<>(); // private final Counter numEphemeralJobTerminated; // private final String jobId; // // SubscriptionStateHandler(String jobId, MantisJobMgr jobMgr) { // this.jobId = jobId; // Metrics m = new Metrics.Builder() // .name(SubscriptionStateHandler.class.getName()) // .addCounter("numEphemeralJobTerminated") // .build(); // m = MetricsRegistry.getInstance().registerAndGet(m); // numEphemeralJobTerminated = m.getCounter("numEphemeralJobTerminated"); // } // // public Observer<HeartbeatPayloadHandler.Data> call() { // return new Observer<HeartbeatPayloadHandler.Data>() { // @Override // public void onCompleted() { // ScheduledFuture prevFutureKill = timedOutExitFutureRef.getAndSet(null); // if (prevFutureKill != null && !prevFutureKill.isCancelled() && !prevFutureKill.isDone()) { // logger.info("Cancelled future kill upon active completion of ephemeral job " + jobId); // prevFutureKill.cancel(false); // } // } // // @Override // public void onError(Throwable e) { // logger.error("Unexpected error " + e.getMessage(), e); // } // // @Override // public void onNext(final HeartbeatPayloadHandler.Data data) { // if (data.getJobMgr() == null || data.getJobMgr().getJobMetadata() == null || // data.getJobMgr().getJobMetadata().getSla().getDurationType() != MantisJobDurationType.Transient) // return; // Boolean subscribed = Boolean.valueOf(data.getPayload().getData()); // logger.info(jobId + ": Handling ephemeral job subscriptions, subscribed=" + subscribed); // if (!subscribed) { // // ephemeral job with no subscribers, consider for reaping // final long subscriberTimeoutSecs = data.getJobMgr().evalSubscriberTimeoutSecs(); // if (timedOutExitFutureRef.get() == null) { // ScheduledFuture<?> futureKill = new ScheduledThreadPoolExecutor(1).schedule(new Runnable() { // @Override // public void run() { // numEphemeralJobTerminated.increment(); // timedOutExitFutureRef.set(null); // data.getJobMgr().handleSubscriberTimeout(); // } // }, subscriberTimeoutSecs, TimeUnit.SECONDS); // if (!timedOutExitFutureRef.compareAndSet(null, futureKill)) // futureKill.cancel(false); // else // logger.info("Setup future job kill (in " + subscriberTimeoutSecs + // " secs) upon no subscribers for ephemeral job " + jobId); // } // } else { // ScheduledFuture prevFutureKill = timedOutExitFutureRef.getAndSet(null); // if (prevFutureKill != null && !prevFutureKill.isCancelled() && !prevFutureKill.isDone()) { // logger.info("Cancelled future kill upon active subscriptions of ephemeral job " + jobId); // prevFutureKill.cancel(false); // } // } // } // }; // } // //}
4,323
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/JobClusterMetadata.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.common.Label; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.parameter.Parameter; public class JobClusterMetadata { private final String name; private final List<Jar> jars; private final JobOwner owner; private final SLA sla; private final List<Parameter> parameters; private final boolean isReadyForJobMaster; private final boolean disabled; private final WorkerMigrationConfig migrationConfig; private final List<Label> labels; private final AtomicLong lastJobCount = new AtomicLong(0); @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public JobClusterMetadata(@JsonProperty("name") String name, @JsonProperty("jars") List<Jar> jars, @JsonProperty("sla") SLA sla, @JsonProperty("parameters") List<Parameter> parameters, @JsonProperty("owner") JobOwner owner, @JsonProperty("lastJobCount") long lastJobCount, @JsonProperty("disabled") boolean disabled, @JsonProperty("isReadyForJobMaster") boolean isReadyForJobMaster, @JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig, @JsonProperty("labels") List<Label> labels) { this.name = name; this.jars = Optional.ofNullable(jars).orElse(new ArrayList<>()); this.sla = sla; this.parameters = Optional.ofNullable(parameters).orElse(new ArrayList<>()); this.isReadyForJobMaster = isReadyForJobMaster; this.owner = owner; this.migrationConfig = migrationConfig; this.labels = labels; this.disabled = disabled; this.lastJobCount.set(lastJobCount); } public String getName() { return name; } public List<Jar> getJars() { return jars; } public JobOwner getOwner() { return owner; } public SLA getSla() { return sla; } public List<Parameter> getParameters() { return parameters; } public boolean isReadyForJobMaster() { return isReadyForJobMaster; } public boolean isDisabled() { return disabled; } public WorkerMigrationConfig getMigrationConfig() { return migrationConfig; } public List<Label> getLabels() { return labels; } public AtomicLong getLastJobCount() { return lastJobCount; } }
4,324
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/Jar.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import java.net.URL; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.runtime.descriptor.SchedulingInfo; public class Jar { private final URL url; private final String version; private final long uploadedAt; private final SchedulingInfo schedulingInfo; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public Jar(@JsonProperty("url") URL url, @JsonProperty("uploadedAt") long uploadedAt, @JsonProperty("version") String version, @JsonProperty("schedulingInfo") SchedulingInfo schedulingInfo) { this.url = url; this.uploadedAt = uploadedAt; this.version = (version == null || version.isEmpty()) ? "" + System.currentTimeMillis() : version; this.schedulingInfo = schedulingInfo; } public URL getUrl() { return url; } public long getUploadedAt() { return uploadedAt; } public String getVersion() { return version; } public SchedulingInfo getSchedulingInfo() { return schedulingInfo; } }
4,325
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/SLA.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import com.netflix.fenzo.triggers.CronTrigger; import com.netflix.fenzo.triggers.TriggerOperator; import com.netflix.fenzo.triggers.exceptions.SchedulerException; import com.netflix.fenzo.triggers.exceptions.TriggerNotFoundException; import io.mantisrx.server.master.store.NamedJob; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class SLA { private static final Logger logger = LoggerFactory.getLogger(SLA.class); @JsonIgnore private static final int MaxValueForSlaMin = 5; @JsonIgnore private static final int MaxValueForSlaMax = 100; @JsonIgnore private static final TriggerOperator triggerOperator; static { triggerOperator = new TriggerOperator(1); try { triggerOperator.initialize(); } catch (SchedulerException e) { logger.error("Unexpected: " + e.getMessage(), e); throw new RuntimeException(e); } } private final int min; private final int max; private final String cronSpec; private final IJobClusterDefinition.CronPolicy cronPolicy; @JsonIgnore private final boolean hasCronSpec; @JsonIgnore private final IJobClusterDefinition.CronPolicy defaultPolicy = IJobClusterDefinition.CronPolicy.KEEP_EXISTING; @JsonIgnore private CronTrigger<NamedJob> scheduledTrigger; @JsonIgnore private String triggerGroup = null; @JsonIgnore private String triggerId = null; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public SLA( @JsonProperty("min") int min, @JsonProperty("max") int max, @JsonProperty("cronSpec") String cronSpec, @JsonProperty("cronPolicy") IJobClusterDefinition.CronPolicy cronPolicy ) { if (cronSpec != null && !cronSpec.isEmpty()) { this.cronSpec = cronSpec; hasCronSpec = true; this.max = 1; this.min = 0; this.cronPolicy = cronPolicy == null ? defaultPolicy : cronPolicy; } else { hasCronSpec = false; this.min = min; this.max = max; this.cronSpec = null; this.cronPolicy = null; } validate(); } public int getMin() { return min; } public int getMax() { return max; } public String getCronSpec() { return cronSpec; } public JobClusterDefinitionImpl.CronPolicy getCronPolicy() { return cronPolicy; } private void validate() throws IllegalArgumentException { if (max < min) throw new IllegalArgumentException("Cannot have max=" + max + " < min=" + min); if (min > MaxValueForSlaMin) throw new IllegalArgumentException("Specified min sla value " + min + " cannot be >" + MaxValueForSlaMin); if (max > MaxValueForSlaMax) throw new IllegalArgumentException("Max sla value " + max + " cannot be >" + MaxValueForSlaMax); } // caller must lock to avoid concurrent access with destroyCron() private void initCron(NamedJob job) throws SchedulerException { if (!hasCronSpec || triggerId != null) return; logger.info("Init'ing cron for " + job.getName()); triggerGroup = job.getName() + "-" + this; try { scheduledTrigger = new CronTrigger<>(cronSpec, job.getName(), job, NamedJob.class, NamedJob.CronTriggerAction.class); triggerId = triggerOperator.registerTrigger(triggerGroup, scheduledTrigger); } catch (IllegalArgumentException e) { throw new SchedulerException(e.getMessage(), e); } } // caller must lock to avoid concurrent access with initCron() private void destroyCron() { try { if (triggerId != null) { logger.info("Destroying cron " + triggerId); triggerOperator.deleteTrigger(triggerGroup, triggerId); triggerId = null; } } catch (TriggerNotFoundException | SchedulerException e) { logger.warn("Couldn't delete trigger group " + triggerGroup + ", id " + triggerId); } } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((cronPolicy == null) ? 0 : cronPolicy.hashCode()); result = prime * result + ((cronSpec == null) ? 0 : cronSpec.hashCode()); result = prime * result + ((defaultPolicy == null) ? 0 : defaultPolicy.hashCode()); result = prime * result + (hasCronSpec ? 1231 : 1237); result = prime * result + max; result = prime * result + min; return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; SLA other = (SLA) obj; if (cronPolicy != other.cronPolicy) return false; if (cronSpec == null) { if (other.cronSpec != null) return false; } else if (!cronSpec.equals(other.cronSpec)) return false; if (defaultPolicy != other.defaultPolicy) return false; if (hasCronSpec != other.hasCronSpec) return false; if (max != other.max) return false; return min == other.min; } @Override public String toString() { return "SLA [min=" + min + ", max=" + max + ", cronSpec=" + cronSpec + ", cronPolicy=" + cronPolicy + ", hasCronSpec=" + hasCronSpec + ", defaultPolicy=" + defaultPolicy + ", scheduledTrigger=" + scheduledTrigger + ", triggerGroup=" + triggerGroup + ", triggerId=" + triggerId + "]"; } }
4,326
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/WorkerRequest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import java.net.URL; import java.util.ArrayList; import java.util.List; import java.util.Optional; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.parameter.Parameter; public class WorkerRequest { private final long subscriptionTimeoutSecs; private final long minRuntimeSecs; private final long jobSubmittedAt; private final String user; // preferred Cluster to launch the worker on private final Optional<String> preferredCluster; private String jobName; private String jobId; private int workerIndex; private int workerNumber; private URL jobJarUrl; private int workerStage; private int totalStages; private MachineDefinition definition; private int numInstancesAtStage; private int numPortsPerInstance; private int metricsPort = -1; private int debugPort = -1; private int consolePort = -1; private int customPort = -1; private List<Integer> ports; private List<Parameter> parameters; private JobSla jobSla; private List<JobConstraints> hardConstraints; private List<JobConstraints> softConstraints; private SchedulingInfo schedulingInfo; public WorkerRequest(MachineDefinition definition, String jobId, int workerIndex, int workerNumber, URL jobJarUrl, int workerStage, int totalStages, int numInstancesAtStage, String jobName, int numPortsPerInstance, List<Parameter> parameters, JobSla jobSla, List<JobConstraints> hardConstraints, List<JobConstraints> softConstraints, SchedulingInfo schedulingInfo, long subscriptionTimeoutSecs, long minRuntimeSecs, long jobSubmittedAt, final String user, final Optional<String> preferredCluster) { this.definition = definition; this.jobId = jobId; this.workerIndex = workerIndex; this.workerNumber = workerNumber; this.jobJarUrl = jobJarUrl; this.workerStage = workerStage; this.totalStages = totalStages; this.numInstancesAtStage = numInstancesAtStage; this.jobName = jobName; this.numPortsPerInstance = numPortsPerInstance + 4; // add additional ports for metricsPort, debugPort, consolePort and customPort ports = new ArrayList<>(); this.parameters = parameters; this.jobSla = jobSla; this.hardConstraints = hardConstraints; this.softConstraints = softConstraints; this.schedulingInfo = schedulingInfo; this.subscriptionTimeoutSecs = subscriptionTimeoutSecs; this.minRuntimeSecs = minRuntimeSecs; this.jobSubmittedAt = jobSubmittedAt; this.user = user; this.preferredCluster = preferredCluster; } public static int getNumPortsPerInstance(MachineDefinition machineDefinition) { return machineDefinition.getNumPorts() + 1; } public SchedulingInfo getSchedulingInfo() { return schedulingInfo; } public List<Parameter> getParameters() { return parameters; } public MachineDefinition getDefinition() { return definition; } public String getJobId() { return jobId; } public int getWorkerIndex() { return workerIndex; } public int getWorkerNumber() { return workerNumber; } public URL getJobJarUrl() { return jobJarUrl; } public int getWorkerStage() { return workerStage; } public int getTotalStages() { return totalStages; } public int getNumInstancesAtStage() { return numInstancesAtStage; } public String getJobName() { return jobName; } public int getNumPortsPerInstance() { return numPortsPerInstance; } public int getMetricsPort() { return metricsPort; } public int getDebugPort() { return debugPort; } public int getConsolePort() { return consolePort; } public int getCustomPort() { return customPort; } public void addPort(int port) { if (metricsPort == -1) { metricsPort = port; // fill metricsPort first } else if (debugPort == -1) { debugPort = port; // fill debug port next } else if (consolePort == -1) { consolePort = port; // fill console port next } else if (customPort == -1) { customPort = port; // fill custom port next } else { ports.add(port); } } public List<Integer> getPorts() { return ports; } public List<Integer> getAllPortsUsed() { List<Integer> allPorts = new ArrayList<>(ports); allPorts.add(metricsPort); allPorts.add(debugPort); allPorts.add(consolePort); allPorts.add(customPort); return allPorts; } public JobSla getJobSla() { return jobSla; } public List<JobConstraints> getHardConstraints() { return hardConstraints; } public List<JobConstraints> getSoftConstraints() { return softConstraints; } public long getSubscriptionTimeoutSecs() { return subscriptionTimeoutSecs; } public long getMinRuntimeSecs() { return minRuntimeSecs; } public long getJobSubmittedAt() { return jobSubmittedAt; } public String getUser() { return user; } public Optional<String> getPreferredCluster() { return preferredCluster; } @Override public String toString() { return jobId + "-Stage-" + workerStage + "-Worker-" + workerIndex; } }
4,327
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/WorkerId.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import java.util.Optional; import io.mantisrx.shaded.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class WorkerId { private static final Logger logger = LoggerFactory.getLogger(WorkerId.class); private static final String DELIMITER = "-"; private static final String WORKER_DELIMITER = "-worker-"; private final String jobCluster; private final String jobId; private final int wIndex; private final int wNum; private final String id; public WorkerId(final String jobId, final int wIndex, final int wNum) { this(WorkerId.getJobClusterFromId(jobId), jobId, wIndex, wNum); } public WorkerId(final String jobCluster, final String jobId, final int wIndex, final int wNum) { Preconditions.checkNotNull(jobCluster, "jobCluster"); Preconditions.checkNotNull(jobId, "jobId"); Preconditions.checkArgument(wIndex >= 0); Preconditions.checkArgument(wNum >= 0); this.jobCluster = jobCluster; this.jobId = jobId; this.wIndex = wIndex; this.wNum = wNum; this.id = new StringBuilder() .append(jobId) .append(WORKER_DELIMITER) .append(wIndex) .append('-') .append(wNum) .toString(); } private static String getJobClusterFromId(final String jobId) { final int jobClusterIdx = jobId.lastIndexOf(DELIMITER); if (jobClusterIdx > 0) { return jobId.substring(0, jobClusterIdx); } else { logger.error("Failed to get JobCluster name from Job ID {}", jobId); throw new IllegalArgumentException("Job ID is invalid " + jobId); } } /* Returns a valid WorkerId only if the passed 'id' string is well-formed. There are some instances in Master currently where we could get back index = -1 which would fail to get a valid WorkerId from String. */ public static Optional<WorkerId> fromId(final String id) { final int workerDelimIndex = id.indexOf(WORKER_DELIMITER); if (workerDelimIndex > 0) { final String jobId = id.substring(0, workerDelimIndex); final int jobClusterIdx = jobId.lastIndexOf(DELIMITER); if (jobClusterIdx > 0) { final String jobCluster = jobId.substring(0, jobClusterIdx); final String workerInfo = id.substring(workerDelimIndex + WORKER_DELIMITER.length()); final int delimiterIndex = workerInfo.indexOf(DELIMITER); if (delimiterIndex > 0) { try { final int wIndex = Integer.parseInt(workerInfo.substring(0, delimiterIndex)); final int wNum = Integer.parseInt(workerInfo.substring(delimiterIndex + 1)); return Optional.of(new WorkerId(jobCluster, jobId, wIndex, wNum)); } catch (NumberFormatException nfe) { logger.warn("failed to parse workerId from {}", id, nfe); } } } } logger.warn("failed to parse workerId from {}", id); return Optional.empty(); } public String getJobCluster() { return jobCluster; } public String getJobId() { return jobId; } public int getWorkerIndex() { return wIndex; } public int getWorkerNum() { return wNum; } public String getId() { return id; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WorkerId workerId = (WorkerId) o; if (wIndex != workerId.wIndex) return false; if (wNum != workerId.wNum) return false; if (!jobCluster.equals(workerId.jobCluster)) return false; if (!jobId.equals(workerId.jobId)) return false; return id.equals(workerId.id); } @Override public int hashCode() { int result = jobCluster.hashCode(); result = 31 * result + jobId.hashCode(); result = 31 * result + wIndex; result = 31 * result + wNum; result = 31 * result + id.hashCode(); return result; } @Override public String toString() { return id; } }
4,328
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/DataFormatAdapter.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import static java.util.Optional.empty; import static java.util.Optional.of; import static java.util.Optional.ofNullable; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import java.time.Instant; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.stream.Collectors; import io.mantisrx.shaded.com.google.common.base.Preconditions; import io.mantisrx.common.Label; import io.mantisrx.common.WorkerPorts; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.jobcluster.IJobClusterMetadata; import io.mantisrx.master.jobcluster.JobClusterMetadataImpl; import io.mantisrx.master.jobcluster.job.FilterableMantisJobMetadataWritable; import io.mantisrx.master.jobcluster.job.FilterableMantisStageMetadataWritable; import io.mantisrx.master.jobcluster.job.FilterableMantisWorkerMetadataWritable; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.IMantisStageMetadata; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl; import io.mantisrx.master.jobcluster.job.MantisStageMetadataImpl; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.MantisJobDefinition; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.NamedJobDefinition; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.descriptor.StageSchedulingInfo; import io.mantisrx.server.master.MantisJobMgr; import io.mantisrx.server.master.MantisJobOperations; import io.mantisrx.server.master.MantisJobStatus; import io.mantisrx.server.master.http.api.JobClusterInfo; import io.mantisrx.server.master.store.InvalidNamedJobException; import io.mantisrx.server.master.store.MantisJobMetadata; import io.mantisrx.server.master.store.MantisJobMetadataWritable; import io.mantisrx.server.master.store.MantisJobStore; import io.mantisrx.server.master.store.MantisStageMetadata; import io.mantisrx.server.master.store.MantisStageMetadataWritable; import io.mantisrx.server.master.store.MantisWorkerMetadata; import io.mantisrx.server.master.store.MantisWorkerMetadataWritable; import io.mantisrx.server.master.store.NamedJob; import io.mantisrx.server.master.store.NamedJobDeleteException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.functions.Action1; import rx.functions.Func2; public class DataFormatAdapter { private static final Logger logger = LoggerFactory.getLogger(DataFormatAdapter.class); public static NamedJob convertJobClusterMetadataToNamedJob(IJobClusterMetadata jobCluster) { return new NamedJob(new NoOpMantisJobOperations(), jobCluster.getJobClusterDefinition().getName(), convertJobClusterConfigsToJars(jobCluster.getJobClusterDefinition().getJobClusterConfigs()), convertSLAToNamedJobSLA(jobCluster.getJobClusterDefinition().getSLA()), jobCluster.getJobClusterDefinition().getParameters(), jobCluster.getJobClusterDefinition().getOwner(), jobCluster.getLastJobCount(), jobCluster.isDisabled(), jobCluster.getJobClusterDefinition().getIsReadyForJobMaster(), jobCluster.getJobClusterDefinition().getWorkerMigrationConfig(), jobCluster.getJobClusterDefinition().getLabels()); } public static NamedJob.CompletedJob convertCompletedJobToNamedJobCompletedJob(JobClusterDefinitionImpl.CompletedJob cJob) { return new NamedJob.CompletedJob(cJob.getName(), cJob.getJobId(), cJob.getVersion(), DataFormatAdapter.convertToMantisJobState(cJob.getState()), cJob.getSubmittedAt(), cJob.getTerminatedAt(), cJob.getUser(), cJob.getLabelList()); } public static JobClusterDefinitionImpl.CompletedJob convertNamedJobCompletedJobToCompletedJob(NamedJob.CompletedJob completedJob) { return new JobClusterDefinitionImpl.CompletedJob(completedJob.getName(),completedJob.getJobId(),completedJob.getVersion(),DataFormatAdapter.convertMantisJobStateToJobState(completedJob.getState()),completedJob.getSubmittedAt(),completedJob.getTerminatedAt(),completedJob.getUser(), completedJob.getLabels()); } public static IJobClusterMetadata convertNamedJobToJobClusterMetadata(NamedJob nJob) { return new JobClusterMetadataImpl.Builder() .withIsDisabled(nJob.getDisabled()) .withLastJobCount(nJob.getLastJobCount()) .withJobClusterDefinition(new JobClusterDefinitionImpl.Builder() .withIsReadyForJobMaster(nJob.getIsReadyForJobMaster()) .withMigrationConfig(nJob.getMigrationConfig()) .withName(nJob.getName()) .withOwner(ofNullable(nJob.getOwner()).orElse(new JobOwner("unknown","unknown","","email@netflix.com","norepo"))) .withSla(DataFormatAdapter.convertToSLA(nJob.getSla())) .withLabels(nJob.getLabels()) .withParameters(nJob.getParameters()) .withJobClusterConfigs(DataFormatAdapter.convertJarsToJobClusterConfigs(nJob.getJars())) .build()) .build(); } public static List<NamedJob.Jar> convertJobClusterConfigsToJars(List<JobClusterConfig> jobClusterConfigs) { Preconditions.checkNotNull(jobClusterConfigs); List<NamedJob.Jar> jarList = new ArrayList<>(jobClusterConfigs.size()); jobClusterConfigs.stream().forEach((jConfig) -> { try { jarList.add(convertJobClusterConfigToJar(jConfig)); } catch (MalformedURLException e) { logger.warn("Exception {} transforming {}", e.getMessage(), jConfig); } }); return jarList; } public static List<JobClusterConfig> convertJarsToJobClusterConfigs(List<NamedJob.Jar> jars) { Preconditions.checkNotNull(jars); List<JobClusterConfig> configs = new ArrayList<>(jars.size()); jars.stream().forEach((jar) -> { try { configs.add(convertJarToJobClusterConfig(jar)); } catch(Exception e) { logger.warn("Exception loading config {}. Skipping...", jar); } }); return configs; } public static NamedJob.Jar convertJobClusterConfigToJar(JobClusterConfig jConfig) throws MalformedURLException { SchedulingInfo sInfo = jConfig.getSchedulingInfo(); String name = jConfig.getArtifactName(); long uploadedAt = jConfig.getUploadedAt(); String version = jConfig.getVersion(); return new NamedJob.Jar(generateURL(name), uploadedAt, version, sInfo); } public static JobClusterConfig convertJarToJobClusterConfig(NamedJob.Jar jar ) { Preconditions.checkNotNull(jar); Optional<String> artifactName = extractArtifactName(jar.getUrl()); String version = jar.getVersion(); return new JobClusterConfig.Builder() .withArtifactName(artifactName.orElse("")) .withVersion(version) .withSchedulingInfo(jar.getSchedulingInfo()) .withUploadedAt(jar.getUploadedAt()) .build(); } public static URL generateURL(String artifactName) throws MalformedURLException { Preconditions.checkNotNull(artifactName, "Artifact Name cannot be null"); if(!artifactName.startsWith("http") ) { return new URL("http://" + artifactName); } return new URL(artifactName); } public static Optional<String> extractArtifactName(String jarStr) { // http://somehose/my-artifact-name-0.0.1.zip //http://mantisui.eu-west-1.dyntest.netflix.net/mantis-artifacts/nfmantis-sources-genericqueryable-source-6.0.8.zip if(jarStr != null && !jarStr.isEmpty()) { int lastIndexOfForwardSlash = jarStr.lastIndexOf('/'); if (lastIndexOfForwardSlash != -1) { String artifactName = jarStr.substring(lastIndexOfForwardSlash + 1, jarStr.length()); return of(artifactName); } } logger.warn("Could not extract artifactName from " + jarStr); return empty(); } public static Optional<String> extractArtifactName(URL jar) { if(jar != null) { String jarStr = jar.toString(); return extractArtifactName(jarStr); } return empty(); } public static NamedJob.SLA convertSLAToNamedJobSLA(io.mantisrx.server.master.domain.SLA sla) { return new NamedJob.SLA(sla.getMin(), sla.getMax(), sla.getCronSpec(), convertToNamedJobDefinitionCronPolicy(sla.getCronPolicy())); } public static NamedJobDefinition.CronPolicy convertToNamedJobDefinitionCronPolicy(IJobClusterDefinition.CronPolicy cPolicy) { if(cPolicy != null) { switch (cPolicy) { case KEEP_EXISTING: return NamedJobDefinition.CronPolicy.KEEP_EXISTING; case KEEP_NEW: return NamedJobDefinition.CronPolicy.KEEP_NEW; default: return NamedJobDefinition.CronPolicy.KEEP_EXISTING; } } return NamedJobDefinition.CronPolicy.KEEP_NEW; } public static MantisWorkerMetadataWritable convertMantisWorkerMetadataToMantisWorkerMetadataWritable(IMantisWorkerMetadata workerMeta) { MantisWorkerMetadataWritable writable = new MantisWorkerMetadataWritable(workerMeta.getWorkerIndex(), workerMeta.getWorkerNumber(), workerMeta.getJobId(), workerMeta.getStageNum(), workerMeta.getNumberOfPorts()); setWorkerMetadataWritable(writable, workerMeta); return writable; } public static FilterableMantisWorkerMetadataWritable convertMantisWorkerMetadataToFilterableMantisWorkerMetadataWritable(IMantisWorkerMetadata workerMeta) { FilterableMantisWorkerMetadataWritable writable = new FilterableMantisWorkerMetadataWritable(workerMeta.getWorkerIndex(), workerMeta.getWorkerNumber(), workerMeta.getJobId(), workerMeta.getStageNum(), workerMeta.getNumberOfPorts()); setWorkerMetadataWritable(writable, workerMeta); return writable; } public static void setWorkerMetadataWritable(MantisWorkerMetadataWritable writable, IMantisWorkerMetadata workerMeta) { writable.setAcceptedAt(workerMeta.getAcceptedAt()); writable.setLaunchedAt(workerMeta.getLaunchedAt()); writable.setCompletedAt(workerMeta.getCompletedAt()); writable.setStartingAt(workerMeta.getStartingAt()); writable.setStartedAt(workerMeta.getStartedAt()); writable.setCluster(workerMeta.getCluster()); writable.setSlave(workerMeta.getSlave()); writable.setSlaveID(workerMeta.getSlaveID()); Optional<WorkerPorts> wPorts = workerMeta.getPorts(); if(wPorts.isPresent()) { WorkerPorts wP = wPorts.get(); writable.addPorts(wP.getPorts()); } writable.setConsolePort(workerMeta.getConsolePort()); writable.setDebugPort(workerMeta.getDebugPort()); writable.setMetricsPort(workerMeta.getMetricsPort()); writable.setCustomPort(workerMeta.getCustomPort()); MantisJobState state = convertWorkerStateToMantisJobState(workerMeta.getState()); try { switch (state) { case Accepted: writable.setStateNoValidation(state, workerMeta.getAcceptedAt(), workerMeta.getReason()); break; case Launched: writable.setStateNoValidation(state, workerMeta.getLaunchedAt(), workerMeta.getReason()); break; case StartInitiated: writable.setStateNoValidation(state, workerMeta.getStartingAt(), workerMeta.getReason()); break; case Started: writable.setStateNoValidation(state, workerMeta.getStartedAt(), workerMeta.getReason()); break; case Failed: writable.setStateNoValidation(state, workerMeta.getCompletedAt(), workerMeta.getReason()); break; case Completed: writable.setStateNoValidation(state, workerMeta.getCompletedAt(), workerMeta.getReason()); break; default: assert false : "Unexpected job state to set"; } } catch (Exception e) { throw new RuntimeException("Error converting to MantisWorkerWriteable " + e.getMessage()); } writable.setResubmitInfo(workerMeta.getResubmitOf(),workerMeta.getTotalResubmitCount()); writable.setReason(workerMeta.getReason()); } /** * Convert/Deserialize metadata into a {@link JobWorker}. * * The converted object could have no worker ports which returns Null. * * Legit Cases: * * 1. Loaded worker was in Accepted state (hasn't been assigned ports yet). * 2. Loaded worker was in Archived state but previously archived from Accepted state. * * Error Cases: * * 1. Loaded worker was in Non-Accepted state (data corruption). * 2. Loaded worker was in Archived state but previously was running or completed (data corruption, but same * semantic as Legit Case 2 above. * * @return a valid converted job worker. */ public static JobWorker convertMantisWorkerMetadataWriteableToMantisWorkerMetadata(MantisWorkerMetadata writeable, LifecycleEventPublisher eventPublisher) { if(logger.isDebugEnabled()) { logger.debug("DataFormatAdatper:converting worker {}", writeable); } String jobId = writeable.getJobId(); List<Integer> ports = new ArrayList<>(writeable.getNumberOfPorts()); ports.add(writeable.getMetricsPort()); ports.add(writeable.getDebugPort()); ports.add(writeable.getConsolePort()); ports.add(writeable.getCustomPort()); if(writeable.getPorts().size() > 0) { ports.add(writeable.getPorts().get(0)); } WorkerPorts workerPorts = null; try { workerPorts = new WorkerPorts(ports); } catch (IllegalArgumentException | IllegalStateException e) { logger.warn("problem loading worker {} for Job ID {}", writeable.getWorkerId(), jobId, e); } JobWorker converted = new JobWorker.Builder() .withJobId(jobId) .withAcceptedAt(writeable.getAcceptedAt()) .withLaunchedAt(writeable.getLaunchedAt()) .withStartingAt(writeable.getStartingAt()) .withStartedAt(writeable.getStartedAt()) .withCompletedAt(writeable.getCompletedAt()) .withNumberOfPorts(ports.size()) .withWorkerPorts(workerPorts) .withResubmitCount(writeable.getTotalResubmitCount()) .withResubmitOf(writeable.getResubmitOf()) .withSlave(writeable.getSlave()) .withSlaveID(writeable.getSlaveID()) .withStageNum(writeable.getStageNum()) .withState(convertMantisJobStateToWorkerState(writeable.getState())) .withWorkerIndex(writeable.getWorkerIndex()) .withWorkerNumber(writeable.getWorkerNumber()) .withJobCompletedReason(writeable.getReason()) .withPreferredCluster(writeable.getCluster()) .withLifecycleEventsPublisher(eventPublisher) .build(); if( logger.isDebugEnabled()) { logger.debug("DataFormatAdatper:converted worker {}", converted); } return converted; } public static MantisStageMetadataWritable convertMantisStageMetadataToMantisStageMetadataWriteable(IMantisStageMetadata stageMeta) { return new MantisStageMetadataWritable(stageMeta.getJobId().getId(), stageMeta.getStageNum(), stageMeta.getNumStages(), stageMeta.getMachineDefinition(), stageMeta.getNumWorkers(), stageMeta.getHardConstraints(), stageMeta.getSoftConstraints(), stageMeta.getScalingPolicy(), stageMeta.getScalable() ); } public static FilterableMantisStageMetadataWritable convertFilterableMantisStageMetadataToMantisStageMetadataWriteable(IMantisStageMetadata stageMeta) { return new FilterableMantisStageMetadataWritable(stageMeta.getJobId().getId(), stageMeta.getStageNum(), stageMeta.getNumStages(), stageMeta.getMachineDefinition(), stageMeta.getNumWorkers(), stageMeta.getHardConstraints(), stageMeta.getSoftConstraints(), stageMeta.getScalingPolicy(), stageMeta.getScalable() ); } public static io.mantisrx.server.master.domain.SLA convertToSLA(NamedJob.SLA sla) { return new io.mantisrx.server.master.domain.SLA(sla.getMin(),sla.getMax(),sla.getCronSpec(),convertToCronPolicy(sla.getCronPolicy())); } public static IJobClusterDefinition.CronPolicy convertToCronPolicy(NamedJobDefinition.CronPolicy cronPolicy) { if(cronPolicy != null) { switch (cronPolicy) { case KEEP_EXISTING: return IJobClusterDefinition.CronPolicy.KEEP_EXISTING; case KEEP_NEW: return IJobClusterDefinition.CronPolicy.KEEP_NEW; default: return IJobClusterDefinition.CronPolicy.KEEP_NEW; } } return null; } public static IMantisJobMetadata convertMantisJobWriteableToMantisJobMetadata(MantisJobMetadata archJob, LifecycleEventPublisher eventPublisher) throws Exception { return convertMantisJobWriteableToMantisJobMetadata(archJob, eventPublisher, false); } // TODO job specific migration config is not supported, migration config will be at cluster level public static IMantisJobMetadata convertMantisJobWriteableToMantisJobMetadata(MantisJobMetadata archJob, LifecycleEventPublisher eventPublisher, boolean isArchived) throws Exception { if(logger.isTraceEnabled()) { logger.trace("DataFormatAdapter:Converting {}", archJob); } // convert stages to new format List<IMantisStageMetadata> convertedStageList = new ArrayList<>(); for (MantisStageMetadata stageMeta : ((MantisJobMetadataWritable) archJob).getStageMetadata()) { // if this is an archived job then add workerIndex may fail as there maybe multiple workers related to a given index so skip adding workers to stage boolean skipAddingWorkers = false; if(isArchived) { skipAddingWorkers = true; } convertedStageList.add(convertMantisStageMetadataWriteableToMantisStageMetadata(stageMeta, eventPublisher,skipAddingWorkers)); } // generate SchedulingInfo SchedulingInfo schedulingInfo = generateSchedulingInfo(convertedStageList); URL jarUrl = archJob.getJarUrl(); Optional<String> artifactName = extractArtifactName(jarUrl); // generate job defn JobDefinition jobDefn = new JobDefinition(archJob.getName(), archJob.getUser(), artifactName.orElse(""), null,archJob.getParameters(), archJob.getSla(), archJob.getSubscriptionTimeoutSecs(),schedulingInfo, archJob.getNumStages(),archJob.getLabels()); Optional<JobId> jIdOp = JobId.fromId(archJob.getJobId()); if(!jIdOp.isPresent()) { throw new IllegalArgumentException("Invalid JobId " + archJob.getJobId()); } // generate job meta MantisJobMetadataImpl mantisJobMetadata = new MantisJobMetadataImpl(jIdOp.get(), archJob.getSubmittedAt(), archJob.getStartedAt(), jobDefn, convertMantisJobStateToJobState(archJob.getState()), archJob.getNextWorkerNumberToUse()); // add the stages for(IMantisStageMetadata stageMetadata : convertedStageList) { mantisJobMetadata.addJobStageIfAbsent(stageMetadata); } if(logger.isTraceEnabled()) { logger.trace("DataFormatAdapter:Completed conversion to IMantisJobMetadata {}", mantisJobMetadata); } return mantisJobMetadata; } private static StageSchedulingInfo generateStageSchedulingInfo(IMantisStageMetadata mantisStageMetadata) { StageSchedulingInfo stageSchedulingInfo = new StageSchedulingInfo(mantisStageMetadata.getNumWorkers(), mantisStageMetadata.getMachineDefinition(),mantisStageMetadata.getHardConstraints(), mantisStageMetadata.getSoftConstraints(),mantisStageMetadata.getScalingPolicy(), mantisStageMetadata.getScalable()); return stageSchedulingInfo; } private static SchedulingInfo generateSchedulingInfo(List<IMantisStageMetadata> convertedStageList) { Map<Integer, StageSchedulingInfo> stageSchedulingInfoMap = new HashMap<>(); Iterator<IMantisStageMetadata> it = convertedStageList.iterator(); while(it.hasNext()) { IMantisStageMetadata stageMeta = it.next(); StageSchedulingInfo stageSchedulingInfo = generateStageSchedulingInfo(stageMeta); stageSchedulingInfoMap.put(stageMeta.getStageNum(), stageSchedulingInfo); } SchedulingInfo schedulingInfo = new SchedulingInfo(stageSchedulingInfoMap); return schedulingInfo; } public static IMantisStageMetadata convertMantisStageMetadataWriteableToMantisStageMetadata( MantisStageMetadata stageMeta, LifecycleEventPublisher eventPublisher) { return convertMantisStageMetadataWriteableToMantisStageMetadata(stageMeta,eventPublisher, false); } public static IMantisStageMetadata convertMantisStageMetadataWriteableToMantisStageMetadata(MantisStageMetadata stageMeta, LifecycleEventPublisher eventPublisher, boolean skipAddingWorkerMetaData) { if(logger.isTraceEnabled()) { logger.trace("DataFormatAdapter:converting stage {}, skipadding workers {}", stageMeta, skipAddingWorkerMetaData); } Optional<JobId> jIdOp = JobId.fromId(stageMeta.getJobId()); if(!jIdOp.isPresent()) { new IllegalArgumentException("Invalid jobid " + stageMeta.getJobId()); } IMantisStageMetadata newStageMeta = new MantisStageMetadataImpl.Builder() .withHardConstraints(stageMeta.getHardConstraints()) .withSoftConstraints(stageMeta.getSoftConstraints()) .withJobId(jIdOp.get()) .withMachineDefinition(stageMeta.getMachineDefinition()) .withNumStages(stageMeta.getNumStages()) .withNumWorkers(stageMeta.getNumWorkers()) .withScalingPolicy(stageMeta.getScalingPolicy()) .withStageNum(stageMeta.getStageNum()) .isScalable(stageMeta.getScalable()) .build(); if(!skipAddingWorkerMetaData) { if(logger.isDebugEnabled()) {logger.debug("Skip adding workers to stage meta");} stageMeta.getAllWorkers().stream() .forEach((mantisWorkerMetadata) -> { ((MantisStageMetadataImpl) newStageMeta).addWorkerIndex(convertMantisWorkerMetadataWriteableToMantisWorkerMetadata(mantisWorkerMetadata, eventPublisher)); }); } if(logger.isDebugEnabled()) { logger.debug("DataFormatAdapter:converted stage {}", newStageMeta); } return newStageMeta; } public static MantisJobMetadataWritable convertMantisJobMetadataToMantisJobMetadataWriteable(IMantisJobMetadata jobMetadata) { Instant startedAtInstant = jobMetadata.getStartedAtInstant().orElse(Instant.ofEpochMilli(0)); return new MantisJobMetadataWritable(jobMetadata.getJobId().getId(), jobMetadata.getJobId().getCluster(), jobMetadata.getUser(), jobMetadata.getSubmittedAtInstant().toEpochMilli(), startedAtInstant.toEpochMilli(), jobMetadata.getJobJarUrl(), jobMetadata.getTotalStages(), jobMetadata.getSla().orElse(null), convertToMantisJobState(jobMetadata.getState()), jobMetadata.getSubscriptionTimeoutSecs(), jobMetadata.getParameters(), jobMetadata.getNextWorkerNumberToUse(), // TODO need to wire migration config here so it can get persisted null, jobMetadata.getLabels()); } public static FilterableMantisJobMetadataWritable convertMantisJobMetadataToFilterableMantisJobMetadataWriteable(IMantisJobMetadata jobMetadata) { Instant startedAtInstant = jobMetadata.getStartedAtInstant().orElse(Instant.ofEpochMilli(0)); return new FilterableMantisJobMetadataWritable(jobMetadata.getJobId().getId(), jobMetadata.getJobId().getCluster(), jobMetadata.getUser(), jobMetadata.getSubmittedAtInstant().toEpochMilli(), startedAtInstant.toEpochMilli(), jobMetadata.getJobJarUrl(), jobMetadata.getTotalStages(), jobMetadata.getSla().orElse(null), convertToMantisJobState(jobMetadata.getState()), jobMetadata.getSubscriptionTimeoutSecs(), jobMetadata.getParameters(), jobMetadata.getNextWorkerNumberToUse(), // TODO need to wire migration config here so it can get persisted null, jobMetadata.getLabels()); } public static JobState convertMantisJobStateToJobState(MantisJobState state) { JobState oState; switch(state) { case Accepted: oState = JobState.Accepted; break; case Launched: oState = JobState.Launched; break; case Started: oState = JobState.Launched; break; case StartInitiated: oState = JobState.Launched; break; case Completed: oState = JobState.Completed; break; case Failed: oState = JobState.Failed; break; default: oState = JobState.Noop; break; } return oState; } public static MantisJobState convertToMantisJobState(JobState state) { MantisJobState oldState; switch(state) { case Accepted: oldState = MantisJobState.Accepted; break; case Launched: oldState = MantisJobState.Launched; break; case Terminating_abnormal: oldState = MantisJobState.Failed; break; case Terminating_normal: oldState = MantisJobState.Completed; break; case Failed: oldState = MantisJobState.Failed; break; case Completed: oldState = MantisJobState.Completed; break; case Noop: oldState = MantisJobState.Noop; break; default: oldState = MantisJobState.Noop; } return oldState; } public static MantisJobState convertWorkerStateToMantisJobState(WorkerState state) { MantisJobState wState; switch(state) { case Accepted: wState = MantisJobState.Accepted; break; case Failed: wState = MantisJobState.Failed; break; case Completed: wState = MantisJobState.Completed; break; case Noop: wState = MantisJobState.Noop; break; case StartInitiated: wState = MantisJobState.StartInitiated; break; case Started: wState = MantisJobState.Started; break; case Launched: wState = MantisJobState.Launched; break; default: wState = MantisJobState.Noop; break; } return wState; } public static WorkerState convertMantisJobStateToWorkerState(MantisJobState state) { WorkerState wState; switch(state) { case Accepted: wState = WorkerState.Accepted; break; case Failed: wState = WorkerState.Failed; break; case Completed: wState = WorkerState.Completed; break; case Noop: wState = WorkerState.Noop; break; case StartInitiated: wState = WorkerState.StartInitiated; break; case Started: wState = WorkerState.Started; break; case Launched: wState = WorkerState.Launched; break; default: wState = WorkerState.Unknown; break; } return wState; } public static List<JobClusterInfo.JarInfo> convertNamedJobJarListToJarInfoList(List<NamedJob.Jar> jars) { return jars.stream().map((jar) -> new JobClusterInfo.JarInfo(jar.getVersion(),jar.getUploadedAt(),jar.getUrl().toString())).collect(Collectors.toList()); } } class NoOpMantisJobOperations implements MantisJobOperations { @Override public NamedJob createNamedJob(NamedJobDefinition namedJobDefinition) throws InvalidNamedJobException { // TODO Auto-generated method stub return null; } @Override public NamedJob updateNamedJar(NamedJobDefinition namedJobDefinition, boolean createIfNeeded) throws InvalidNamedJobException { // TODO Auto-generated method stub return null; } @Override public NamedJob quickUpdateNamedJob(String user, String name, URL jobJar, String version) throws InvalidNamedJobException { // TODO Auto-generated method stub return null; } @Override public void updateSla(String user, String name, NamedJob.SLA sla, boolean forceEnable) throws InvalidNamedJobException { // TODO Auto-generated method stub } @Override public void updateLabels(String user, String name, List<Label> labels) throws InvalidNamedJobException { // TODO Auto-generated method stub } @Override public void updateMigrateStrategy(String user, String name, WorkerMigrationConfig migrationConfig) throws InvalidNamedJobException { // TODO Auto-generated method stub } @Override public String quickSubmit(String jobName, String user) throws InvalidNamedJobException, io.mantisrx.server.master.store.InvalidJobException { // TODO Auto-generated method stub return null; } @Override public Optional<NamedJob> getNamedJob(String name) { // TODO Auto-generated method stub return null; } @Override public void deleteNamedJob(String name, String user) throws NamedJobDeleteException { // TODO Auto-generated method stub } @Override public void disableNamedJob(String name, String user) throws InvalidNamedJobException { // TODO Auto-generated method stub } @Override public void enableNamedJob(String name, String user) throws InvalidNamedJobException { // TODO Auto-generated method stub } @Override public MantisJobStatus submit(MantisJobDefinition jobDefinition) { // TODO Auto-generated method stub return null; } @Override public boolean deleteJob(String jobId) throws IOException { // TODO Auto-generated method stub return false; } @Override public void killJob(String user, String jobId, String reason) { // TODO Auto-generated method stub } @Override public void terminateJob(String jobId) { // TODO Auto-generated method stub } @Override public Observable<MantisJobStatus> jobs() { // TODO Auto-generated method stub return null; } @Override public MantisJobStatus status(String jobId) { // TODO Auto-generated method stub return null; } @Override public Func2<MantisJobStore, Map<String, MantisJobDefinition>, Collection<NamedJob>> getJobsInitializer() { // TODO Auto-generated method stub return null; } @Override public Collection<MantisJobMgr> getAllJobMgrs() { // TODO Auto-generated method stub return null; } @Override public Optional<MantisJobMgr> getJobMgr(String jobId) { // TODO Auto-generated method stub return null; } @Override public Action1<String> getSlaveDisabler() { // TODO Auto-generated method stub return null; } @Override public Action1<String> getSlaveEnabler() { // TODO Auto-generated method stub return null; } @Override public void setReady() { // TODO Auto-generated method stub } }
4,329
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/JobMetadata.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import java.net.URL; import java.util.List; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.parameter.Parameter; public class JobMetadata { private final JobId jobId; private final URL jobJarUrl; private final int totalStages; private final String user; private final SchedulingInfo schedulingInfo; private final List<Parameter> parameters; private final long subscriptionTimeoutSecs; private final long minRuntimeSecs; public JobMetadata(final JobId jobId, final URL jobJarUrl, final int totalStages, final String user, final SchedulingInfo schedulingInfo, final List<Parameter> parameters, final long subscriptionTimeoutSecs, final long minRuntimeSecs) { this.jobId = jobId; this.jobJarUrl = jobJarUrl; this.totalStages = totalStages; this.user = user; this.schedulingInfo = schedulingInfo; this.parameters = parameters; this.subscriptionTimeoutSecs = subscriptionTimeoutSecs; this.minRuntimeSecs = minRuntimeSecs; } public JobId getJobId() { return jobId; } public URL getJobJarUrl() { return jobJarUrl; } public int getTotalStages() { return totalStages; } public String getUser() { return user; } public List<Parameter> getParameters() { return parameters; } public SchedulingInfo getSchedulingInfo() { return schedulingInfo; } public long getSubscriptionTimeoutSecs() { return subscriptionTimeoutSecs; } public long getMinRuntimeSecs() { return minRuntimeSecs; } }
4,330
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/Messages.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import akka.actor.ActorRef; public class Messages { public static final Object Put = new PutMessage() { @Override public String toString() { return "Put"; } }; public static final Object Take = new TakeMessage() { @Override public String toString() { return "Take"; } }; public static final Object Think = new ThinkMessage() {}; private interface PutMessage {} private interface TakeMessage {} private interface ThinkMessage {} public static final class Busy { public final ActorRef chopstick; public Busy(ActorRef chopstick) { this.chopstick = chopstick; } } public static final class Taken { public final ActorRef chopstick; public Taken(ActorRef chopstick) { this.chopstick = chopstick; } } }
4,331
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/JobId.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import java.util.Optional; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; public class JobId implements Comparable<JobId> { private static final String DELIMITER = "-"; private final String cluster; private final long jobNum; private final String id; /** * @param clusterName The Job Cluster that this jobID belongs to * @param jobNum Identifies the job for this cluster */ @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public JobId(@JsonProperty("clusterName") final String clusterName, @JsonProperty("jobNum") final long jobNum) { this.cluster = clusterName; this.jobNum = jobNum; this.id = clusterName + DELIMITER + jobNum; } /* Returns a valid JobId if the passed 'id' string is wellformed */ public static Optional<JobId> fromId(final String id) { final int i = id.lastIndexOf(DELIMITER); if (i < 0) { return Optional.empty(); } final String jobCluster = id.substring(0, i); try { final int jobNum = Integer.parseInt(id.substring(i + 1)); return Optional.ofNullable(new JobId(jobCluster, jobNum)); } catch (NumberFormatException nfe) { return Optional.empty(); } } public String getCluster() { return cluster; } public long getJobNum() { return jobNum; } public String getId() { return id; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((cluster == null) ? 0 : cluster.hashCode()); result = prime * result + ((id == null) ? 0 : id.hashCode()); result = prime * result + (int) (jobNum ^ (jobNum >>> 32)); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; JobId other = (JobId) obj; if (cluster == null) { if (other.cluster != null) return false; } else if (!cluster.equals(other.cluster)) return false; if (id == null) { if (other.id != null) return false; } else if (!id.equals(other.id)) return false; return jobNum == other.jobNum; } @Override public String toString() { return id; } @Override public int compareTo(JobId o) { return id.compareTo(o.getId()); } }
4,332
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/JobClusterConfig.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import java.util.Objects; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.shaded.com.google.common.base.Preconditions; import io.mantisrx.runtime.descriptor.SchedulingInfo; public class JobClusterConfig { private final String artifactName; private final String version; private final long uploadedAt; private final SchedulingInfo schedulingInfo; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public JobClusterConfig(@JsonProperty("artifactName") String artifactName, @JsonProperty("uploadedAt") long uploadedAt, @JsonProperty("version") String version, @JsonProperty("schedulingInfo") SchedulingInfo schedulingInfo ) { this.artifactName = artifactName; this.uploadedAt = uploadedAt; this.version = (version == null || version.isEmpty()) ? "" + System.currentTimeMillis() : version; this.schedulingInfo = schedulingInfo; } public String getArtifactName() { return artifactName; } public long getUploadedAt() { return uploadedAt; } public String getVersion() { return version; } public SchedulingInfo getSchedulingInfo() { return schedulingInfo; } @Override public String toString() { return "JobClusterConfig [artifactName=" + artifactName + ", version=" + version + ", uploadedAt=" + uploadedAt + ", schedulingInfo=" + schedulingInfo + "]"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; JobClusterConfig that = (JobClusterConfig) o; return uploadedAt == that.uploadedAt && Objects.equals(artifactName, that.artifactName) && Objects.equals(version, that.version) && Objects.equals(schedulingInfo, that.schedulingInfo); } @Override public int hashCode() { return Objects.hash(artifactName, version, uploadedAt, schedulingInfo); } public static class Builder { String artifactName; String version; long uploadedAt = -1; SchedulingInfo schedulingInfo; public Builder() {} public Builder withArtifactName(String artifactName) { Preconditions.checkNotNull(artifactName, "artifactName cannot be null"); Preconditions.checkArgument(!artifactName.isEmpty(), "ArtifactName cannot be empty"); this.artifactName = artifactName; return this; } public Builder withVersion(String version) { Preconditions.checkNotNull(version, "version cannot be null"); Preconditions.checkArgument(!version.isEmpty(), "version cannot be empty"); this.version = version; return this; } public Builder withUploadedAt(long uAt) { Preconditions.checkArgument(uAt > 0, "uploaded At cannot be <= 0"); this.uploadedAt = uAt; return this; } public Builder withSchedulingInfo(SchedulingInfo sInfo) { Preconditions.checkNotNull(sInfo, "schedulingInfo cannot be null"); this.schedulingInfo = sInfo; return this; } public Builder from(JobClusterConfig config) { artifactName = config.getArtifactName(); version = config.getVersion(); uploadedAt = config.getUploadedAt(); schedulingInfo = config.getSchedulingInfo(); return this; } // TODO add validity checks for SchedulingInfo, MachineDescription etc public JobClusterConfig build() { Preconditions.checkNotNull(artifactName); Preconditions.checkNotNull(schedulingInfo); this.uploadedAt = (uploadedAt == -1) ? System.currentTimeMillis() : uploadedAt; this.version = (version == null || version.isEmpty()) ? "" + System.currentTimeMillis() : version; return new JobClusterConfig(artifactName, uploadedAt, version, schedulingInfo); } } }
4,333
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/IJobClusterDefinition.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import java.util.List; import io.mantisrx.common.Label; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.parameter.Parameter; public interface IJobClusterDefinition { JobOwner getOwner(); SLA getSLA(); WorkerMigrationConfig getWorkerMigrationConfig(); boolean getIsReadyForJobMaster(); List<JobClusterConfig> getJobClusterConfigs(); JobClusterConfig getJobClusterConfig(); String getName(); String getUser(); String toString(); List<Parameter> getParameters(); List<Label> getLabels(); enum CronPolicy {KEEP_EXISTING, KEEP_NEW} }
4,334
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/JobClusterDefinitionImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.Optional; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.shaded.com.google.common.base.Preconditions; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.common.Label; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.parameter.Parameter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class JobClusterDefinitionImpl implements IJobClusterDefinition { private static final Logger logger = LoggerFactory.getLogger(JobClusterDefinitionImpl.class); private final String name; private final String user; private final JobOwner owner; private final SLA sla; private final WorkerMigrationConfig migrationConfig; private final List<JobClusterConfig> jobClusterConfigs = Lists.newArrayList(); private final List<Parameter> parameters; private final List<Label> labels; private boolean isReadyForJobMaster = false; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public JobClusterDefinitionImpl(@JsonProperty("name") String name, @JsonProperty("jobClusterConfigs") List<JobClusterConfig> jobClusterConfigs, @JsonProperty("owner") JobOwner owner, @JsonProperty("user") String user, @JsonProperty("sla") SLA sla, @JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig, @JsonProperty("isReadyForJobMaster") boolean isReadyForJobMaster, @JsonProperty("parameters") List<Parameter> parameters, @JsonProperty("labels") List<Label> labels ) { Preconditions.checkNotNull(jobClusterConfigs); Preconditions.checkArgument(!jobClusterConfigs.isEmpty()); this.owner = owner; this.name = name; this.sla = Optional.ofNullable(sla).orElse(new SLA(0, 0, null, CronPolicy.KEEP_EXISTING)); this.migrationConfig = Optional.ofNullable(migrationConfig).orElse(WorkerMigrationConfig.DEFAULT); this.isReadyForJobMaster = isReadyForJobMaster; this.jobClusterConfigs.addAll(jobClusterConfigs); this.labels = Optional.ofNullable(labels).orElse(Lists.newArrayList()); this.parameters = Optional.ofNullable(parameters).orElse(Lists.newArrayList()); this.user = user; } /* (non-Javadoc) * @see io.mantisrx.server.master.domain.IJobClusterDefinition#getOwner() */ @Override public JobOwner getOwner() { return owner; } /* (non-Javadoc) * @see io.mantisrx.server.master.domain.IJobClusterDefinition#getSLA() */ @Override public SLA getSLA() { return this.sla; } /* (non-Javadoc) * @see io.mantisrx.server.master.domain.IJobClusterDefinition#getWorkerMigrationConfig() */ @Override public WorkerMigrationConfig getWorkerMigrationConfig() { return this.migrationConfig; } /* (non-Javadoc) * @see io.mantisrx.server.master.domain.IJobClusterDefinition#getIsReadyForJobMaster() */ @Override public boolean getIsReadyForJobMaster() { return this.isReadyForJobMaster; } /* (non-Javadoc) * @see io.mantisrx.server.master.domain.IJobClusterDefinition#getJobClusterConfigs() */ @Override public List<JobClusterConfig> getJobClusterConfigs() { return Collections.unmodifiableList(this.jobClusterConfigs); } /* (non-Javadoc) * @see io.mantisrx.server.master.domain.IJobClusterDefinition#getJobClusterConfig() */ @Override public JobClusterConfig getJobClusterConfig() { return this.jobClusterConfigs.get(jobClusterConfigs.size() - 1); } /* (non-Javadoc) * @see io.mantisrx.server.master.domain.IJobClusterDefinition#getName() */ @Override public String getName() { return name; } /* (non-Javadoc) * @see io.mantisrx.server.master.domain.IJobClusterDefinition#getUser() */ @Override public String getUser() { return user; } @Override public List<Parameter> getParameters() { return Collections.unmodifiableList(this.parameters); } @Override public List<Label> getLabels() { return Collections.unmodifiableList(this.labels); } @Override public String toString() { return "JobClusterDefinitionImpl{" + "name='" + name + '\'' + ", user='" + user + '\'' + ", owner=" + owner + ", sla=" + sla + ", migrationConfig=" + migrationConfig + ", isReadyForJobMaster=" + isReadyForJobMaster + ", jobClusterConfigs=" + jobClusterConfigs + ", parameters=" + parameters + ", labels=" + labels + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; JobClusterDefinitionImpl that = (JobClusterDefinitionImpl) o; return isReadyForJobMaster == that.isReadyForJobMaster && Objects.equals(name, that.name) && Objects.equals(user, that.user) && Objects.equals(owner, that.owner) && Objects.equals(sla, that.sla) && Objects.equals(migrationConfig, that.migrationConfig) && Objects.equals(jobClusterConfigs, that.jobClusterConfigs) && Objects.equals(parameters, that.parameters) && Objects.equals(labels, that.labels); } @Override public int hashCode() { return Objects.hash(name, user, owner, sla, migrationConfig, isReadyForJobMaster, jobClusterConfigs, parameters, labels); } public static class CompletedJob { private final String name; private final String jobId; private final String version; private final JobState state; private final long submittedAt; private final long terminatedAt; private final String user; private final List<Label> labelList; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public CompletedJob( @JsonProperty("name") String name, @JsonProperty("jobId") String jobId, @JsonProperty("version") String version, @JsonProperty("state") JobState state, @JsonProperty("submittedAt") long submittedAt, @JsonProperty("terminatedAt") long terminatedAt, @JsonProperty("user") String user, @JsonProperty("labels") List<Label> labels ) { this.name = name; this.jobId = jobId; this.version = version; this.state = state; this.submittedAt = submittedAt; this.terminatedAt = terminatedAt; this.user = user; this.labelList = labels; } public String getName() { return name; } public String getJobId() { return jobId; } public String getVersion() { return version; } public JobState getState() { return state; } public long getSubmittedAt() { return submittedAt; } public long getTerminatedAt() { return terminatedAt; } public String getUser() { return user; } public List<Label> getLabelList() { return labelList; } @Override public String toString() { return "CompletedJob{" + "name='" + name + '\'' + ", jobId='" + jobId + '\'' + ", version='" + version + '\'' + ", state=" + state + ", submittedAt=" + submittedAt + ", terminatedAt=" + terminatedAt + ", user='" + user + '\'' + ", labelList=" + labelList + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; CompletedJob that = (CompletedJob) o; return submittedAt == that.submittedAt && terminatedAt == that.terminatedAt && Objects.equals(name, that.name) && Objects.equals(jobId, that.jobId) && Objects.equals(version, that.version) && state == that.state && Objects.equals(user, that.user); } @Override public int hashCode() { return Objects.hash(name, jobId, version, state, submittedAt, terminatedAt, user); } } public static class Builder { List<JobClusterConfig> jobClusterConfigs = new ArrayList<>(); JobOwner owner = null; SLA sla = new SLA(0, 0, null, null); WorkerMigrationConfig migrationConfig = WorkerMigrationConfig.DEFAULT; boolean isReadyForJobMaster = true; String name = null; String user = "default"; List<Parameter> parameters = Lists.newArrayList(); List<Label> labels = Lists.newArrayList(); public Builder() {} public Builder withName(String name) { Preconditions.checkNotNull(name, "Cluster name cannot be null"); Preconditions.checkArgument(!name.isEmpty(), "cluster Name cannot be empty"); this.name = name; return this; } public Builder withUser(String user) { Preconditions.checkNotNull(user, "user cannot be null"); Preconditions.checkArgument(!user.isEmpty(), "user cannot be empty"); this.user = user; return this; } public Builder withJobClusterConfig(JobClusterConfig config) { Preconditions.checkNotNull(config, "config cannot be null"); if (!jobClusterConfigs.contains(config)) { // skip if this config already exists jobClusterConfigs.add(config); } return this; } public Builder withJobClusterConfigs(List<JobClusterConfig> jars) { Preconditions.checkNotNull(jars, "config list cannot be null"); this.jobClusterConfigs = jars; return this; } public Builder withOwner(JobOwner owner) { Preconditions.checkNotNull(owner, "owner cannot be null"); this.owner = owner; return this; } public Builder withSla(SLA sla) { Preconditions.checkNotNull(sla, "sla cannot be null"); this.sla = sla; return this; } public Builder withMigrationConfig(WorkerMigrationConfig config) { Preconditions.checkNotNull(config, "migration config cannot be null"); this.migrationConfig = config; return this; } public Builder withIsReadyForJobMaster(boolean ready) { this.isReadyForJobMaster = ready; return this; } public Builder withParameters(List<Parameter> ps) { this.parameters = ps; return this; } public Builder withLabels(List<Label> labels) { this.labels = labels; return this; } public Builder withLabel(Label label) { Preconditions.checkNotNull(label, "label cannot be null"); this.labels.add(label); return this; } public Builder from(IJobClusterDefinition defn) { migrationConfig = defn.getWorkerMigrationConfig(); name = defn.getName(); sla = defn.getSLA(); isReadyForJobMaster = defn.getIsReadyForJobMaster(); owner = defn.getOwner(); user = defn.getUser(); parameters = defn.getParameters(); labels = defn.getLabels(); // we don't want to duplicates but retain the order so cannot use Set for (JobClusterConfig jcConfig : defn.getJobClusterConfigs()) { if (!jobClusterConfigs.contains(jcConfig)) { jobClusterConfigs.add(jcConfig); } } //defn.getJobClusterConfigs().forEach(jobClusterConfigs::add); return this; } public Builder mergeConfigsAndOverrideRest(IJobClusterDefinition oldDefn, IJobClusterDefinition newDefn) { logger.info("Existing JobClusterConfigs {} ", oldDefn.getJobClusterConfigs()); logger.info("New JobClusterConfig {} ", newDefn.getJobClusterConfig()); this.jobClusterConfigs.addAll(oldDefn.getJobClusterConfigs()); this.jobClusterConfigs.add(newDefn.getJobClusterConfig()); logger.info("Merged JobClusterConfigs {} ", this.jobClusterConfigs); this.sla = newDefn.getSLA(); this.parameters = newDefn.getParameters(); this.labels = newDefn.getLabels(); this.user = newDefn.getUser(); this.migrationConfig = newDefn.getWorkerMigrationConfig(); this.owner = newDefn.getOwner(); this.isReadyForJobMaster = newDefn.getIsReadyForJobMaster(); this.name = oldDefn.getName(); return this; } public JobClusterDefinitionImpl build() { Preconditions.checkNotNull(owner); Preconditions.checkNotNull(name); Preconditions.checkNotNull(user); Preconditions.checkNotNull(jobClusterConfigs); Preconditions.checkArgument(!jobClusterConfigs.isEmpty()); return new JobClusterDefinitionImpl(name, jobClusterConfigs, owner, user, sla, migrationConfig, isReadyForJobMaster, parameters, labels); } } }
4,335
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/domain/JobDefinition.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.domain; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Objects; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.shaded.com.google.common.base.Preconditions; import io.mantisrx.common.Label; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.command.InvalidJobException; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.descriptor.StageSchedulingInfo; import io.mantisrx.runtime.parameter.Parameter; public class JobDefinition { private final String name; private final String user; private final String artifactName; private final String version; private final List<Parameter> parameters; private final JobSla jobSla; private final long subscriptionTimeoutSecs; private final SchedulingInfo schedulingInfo; private final int withNumberOfStages; private List<Label> labels; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public JobDefinition(@JsonProperty("name") String name, @JsonProperty("user") String user, @JsonProperty("artifactName") String artifactName, @JsonProperty("version") String version, @JsonProperty("parameters") List<Parameter> parameters, @JsonProperty("jobSla") JobSla jobSla, @JsonProperty("subscriptionTimeoutSecs") long subscriptionTimeoutSecs, @JsonProperty("schedulingInfo") SchedulingInfo schedulingInfo, @JsonProperty("numberOfStages") int withNumberOfStages, @JsonProperty("labels") List<Label> labels ) throws InvalidJobException { this.name = name; this.user = user; this.artifactName = artifactName; this.version = version; if (parameters != null) { this.parameters = parameters; } else { this.parameters = new LinkedList<>(); } if (labels != null) { this.labels = labels; } else { this.labels = new LinkedList<>(); } this.jobSla = jobSla; if (subscriptionTimeoutSecs > 0) { this.subscriptionTimeoutSecs = subscriptionTimeoutSecs; } else { this.subscriptionTimeoutSecs = 0; } this.schedulingInfo = schedulingInfo; this.withNumberOfStages = withNumberOfStages; validate(true); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; JobDefinition that = (JobDefinition) o; return subscriptionTimeoutSecs == that.subscriptionTimeoutSecs && withNumberOfStages == that.withNumberOfStages && Objects.equals(name, that.name) && Objects.equals(user, that.user) && Objects.equals(artifactName, that.artifactName) && Objects.equals(version, that.version) && Objects.equals(parameters, that.parameters) && Objects.equals(jobSla, that.jobSla) && Objects.equals(labels, that.labels); } @Override public int hashCode() { return Objects.hash(name, user, artifactName, version, parameters, jobSla, subscriptionTimeoutSecs, labels, withNumberOfStages); } @Override public String toString() { return "JobDefinition{" + "name='" + name + '\'' + ", user='" + user + '\'' + ", artifactName='" + artifactName + '\'' + ", version='" + version + '\'' + ", parameters=" + parameters + ", jobSla=" + jobSla + ", subscriptionTimeoutSecs=" + subscriptionTimeoutSecs + ", schedulingInfo=" + schedulingInfo + ", labels=" + labels + ", withNumberOfStages=" + withNumberOfStages + '}'; } public void validate(boolean schedulingInfoOptional) throws InvalidJobException { validateSla(); validateSchedulingInfo(schedulingInfoOptional); } private void validateSla() throws InvalidJobException { if (jobSla == null) throw new InvalidJobException("No Job SLA provided (likely incorrect job submit request)"); if (jobSla.getDurationType() == null) throw new InvalidJobException("Invalid null duration type in job sla (likely incorrect job submit request"); } public void validateSchedulingInfo() throws InvalidJobException { validateSchedulingInfo(false); } private void validateSchedulingInfo(boolean schedulingInfoOptional) throws InvalidJobException { if (schedulingInfoOptional && schedulingInfo == null) return; if (schedulingInfo == null) throw new InvalidJobException("No scheduling info provided"); if (schedulingInfo.getStages() == null) throw new InvalidJobException("No stages defined in scheduling info"); int withNumberOfStages = schedulingInfo.getStages().size(); int startingIdx = 1; if (schedulingInfo.forStage(0) != null) { // jobMaster stage 0 definition exists, adjust index range startingIdx = 0; withNumberOfStages--; } for (int i = startingIdx; i <= withNumberOfStages; i++) { StageSchedulingInfo stage = schedulingInfo.getStages().get(i); if (stage == null) throw new InvalidJobException("No definition for stage " + i + " in scheduling info for " + withNumberOfStages + " stage job"); if (stage.getNumberOfInstances() < 1) throw new InvalidJobException("Number of instance for stage " + i + " must be >0, not " + stage.getNumberOfInstances()); MachineDefinition machineDefinition = stage.getMachineDefinition(); if (machineDefinition.getCpuCores() <= 0) throw new InvalidJobException("cpuCores must be >0.0, not " + machineDefinition.getCpuCores()); if (machineDefinition.getMemoryMB() <= 0) throw new InvalidJobException("memory must be <0.0, not " + machineDefinition.getMemoryMB()); if (machineDefinition.getDiskMB() < 0) throw new InvalidJobException("disk must be >=0, not " + machineDefinition.getDiskMB()); if (machineDefinition.getNumPorts() < 0) throw new InvalidJobException("numPorts must be >=0, not " + machineDefinition.getNumPorts()); } } public String getName() { return name; } public String getUser() { return user; } public String getArtifactName() { return artifactName; } public String getVersion() { return version;} public List<Parameter> getParameters() { return Collections.unmodifiableList(parameters); } public JobSla getJobSla() { return jobSla; } public long getSubscriptionTimeoutSecs() { return subscriptionTimeoutSecs; } public SchedulingInfo getSchedulingInfo() { return schedulingInfo; } // // TODO make immutable // public void setSchedulingInfo(SchedulingInfo schedulingInfo) { // this.schedulingInfo = schedulingInfo; // } public List<Label> getLabels() { return Collections.unmodifiableList(this.labels); } public int getNumberOfStages() { return this.withNumberOfStages; } public static class Builder { private String name; private String user; private List<Parameter> parameters; private List<Label> labels; private String artifactName = null; private String version = null; private JobSla jobSla = new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, null); private long subscriptionTimeoutSecs = 0L; private SchedulingInfo schedulingInfo; private int withNumberOfStages = 1; public Builder() { } public Builder withName(String name) { this.name = name; return this; } public Builder withArtifactName(String artifactName) { this.artifactName = artifactName; return this; } public Builder withJobSla(JobSla sla) { this.jobSla = sla; return this; } public Builder withUser(String user) { this.user = user; return this; } public Builder withSchedulingInfo(SchedulingInfo schedInfo) { this.schedulingInfo = schedInfo; return this; } public Builder withNumberOfStages(int stages) { this.withNumberOfStages = stages; return this; } public Builder withSubscriptionTimeoutSecs(long t) { this.subscriptionTimeoutSecs = t; return this; } public Builder withParameters(List<Parameter> params) { this.parameters = params; return this; } public Builder withLabels(List<Label> labels) { this.labels = labels; return this; } public Builder withVersion(String version) { this.version = version; return this; } public Builder from(final JobDefinition jobDefinition) { this.withJobSla(jobDefinition.getJobSla()); this.withNumberOfStages(jobDefinition.getNumberOfStages()); this.withSubscriptionTimeoutSecs(jobDefinition.getSubscriptionTimeoutSecs()); this.withUser(jobDefinition.user); this.withSchedulingInfo(jobDefinition.getSchedulingInfo()); this.withParameters(jobDefinition.getParameters()); this.withLabels(jobDefinition.getLabels()); this.withName(jobDefinition.name); this.withArtifactName(jobDefinition.artifactName); this.withVersion(jobDefinition.getVersion()); return this; } public JobDefinition build() throws InvalidJobException { Preconditions.checkNotNull(name, "cluster name cannot be null"); Preconditions.checkNotNull(jobSla, "job sla cannot be null"); // Preconditions.checkNotNull(schedulingInfo, "schedulingInfo cannot be null"); if (schedulingInfo != null) { withNumberOfStages = schedulingInfo.getStages().size(); } Preconditions.checkArgument(withNumberOfStages > 0, "Number of stages cannot be less than 0"); return new JobDefinition(name, user, artifactName, version, parameters, jobSla, subscriptionTimeoutSecs, schedulingInfo, withNumberOfStages, labels); } } }
4,336
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/JobNameAlreadyExistsException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; public class JobNameAlreadyExistsException extends Exception { public JobNameAlreadyExistsException(String message) { super(message); } public JobNameAlreadyExistsException(String message, Throwable cause) { super(message, cause); } }
4,337
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/NamedJobDeleteException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; public class NamedJobDeleteException extends Exception { public NamedJobDeleteException(String reason) { super(reason); } public NamedJobDeleteException(String reason, Throwable cause) { super(reason, cause); } }
4,338
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/MantisJobMetadata.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; import java.net.URL; import java.util.Collection; import java.util.List; import io.mantisrx.common.Label; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.parameter.Parameter; public interface MantisJobMetadata { long DEFAULT_STARTED_AT_EPOCH = 0; String getJobId(); String getName(); String getUser(); long getSubmittedAt(); long getStartedAt(); URL getJarUrl(); JobSla getSla(); long getSubscriptionTimeoutSecs(); MantisJobState getState(); List<Parameter> getParameters(); List<Label> getLabels(); Collection<? extends MantisStageMetadata> getStageMetadata(); int getNumStages(); MantisStageMetadata getStageMetadata(int stageNum); MantisWorkerMetadata getWorkerByIndex(int stageNumber, int workerIndex) throws InvalidJobException; MantisWorkerMetadata getWorkerByNumber(int workerNumber) throws InvalidJobException; AutoCloseable obtainLock(); int getNextWorkerNumberToUse(); WorkerMigrationConfig getMigrationConfig(); }
4,339
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/MantisWorkerMetadataWritable.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.concurrent.locks.ReentrantLock; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.domain.WorkerId; public class MantisWorkerMetadataWritable implements MantisWorkerMetadata { @JsonIgnore private final WorkerId workerId; @JsonIgnore private final ReentrantLock lock = new ReentrantLock(); private int workerIndex; private int workerNumber; private String jobId; private int stageNum; private int numberOfPorts; private int metricsPort; private int consolePort; private int debugPort = -1; private int customPort; private List<Integer> ports; private volatile MantisJobState state; private String slave; private String slaveID; private Optional<String> cluster = Optional.empty(); private long acceptedAt = 0; private long launchedAt = 0; private long startingAt = 0; private long startedAt = 0; private long completedAt = 0; private JobCompletedReason reason; private int resubmitOf = -1; private int totalResubmitCount = 0; @JsonIgnore private volatile long lastHeartbeatAt = 0; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public MantisWorkerMetadataWritable(@JsonProperty("workerIndex") int workerIndex, @JsonProperty("workerNumber") int workerNumber, @JsonProperty("jobId") String jobId, @JsonProperty("stageNum") int stageNum, @JsonProperty("numberOfPorts") int numberOfPorts) { this.workerIndex = workerIndex; this.workerNumber = workerNumber; this.jobId = jobId; this.workerId = new WorkerId(jobId, workerIndex, workerNumber); this.stageNum = stageNum; this.numberOfPorts = numberOfPorts; this.state = MantisJobState.Accepted; this.acceptedAt = System.currentTimeMillis(); this.ports = new ArrayList<>(); } @Override public int getWorkerIndex() { return workerIndex; } @Override public int getWorkerNumber() { return workerNumber; } @Override public WorkerId getWorkerId() { return workerId; } @Override public String getJobId() { return jobId; } @Override public int getStageNum() { return stageNum; } @Override public int getNumberOfPorts() { return numberOfPorts; } @Override public List<Integer> getPorts() { return ports; } @Override public void addPorts(List<Integer> ports) { this.ports.addAll(ports); } @Override public int getTotalResubmitCount() { return totalResubmitCount; } @Override public int getMetricsPort() { return metricsPort; } public void setMetricsPort(int metricsPort) { this.metricsPort = metricsPort; } @Override public int getDebugPort() { return debugPort; } public void setDebugPort(int debugPort) { this.debugPort = debugPort; } @Override public int getConsolePort() { return consolePort; } public void setConsolePort(int port) { this.consolePort = port; } @Override public int getCustomPort() { return customPort; } public void setCustomPort(int port) { this.customPort = port; } @Override public int getResubmitOf() { return resubmitOf; } @JsonIgnore public void setResubmitInfo(int resubmitOf, int totalCount) { this.resubmitOf = resubmitOf; this.totalResubmitCount = totalCount; } @JsonIgnore public long getLastHeartbeatAt() { return lastHeartbeatAt; } @JsonIgnore public void setLastHeartbeatAt(long lastHeartbeatAt) { this.lastHeartbeatAt = lastHeartbeatAt; } private void validateStateChange(MantisJobState newState) throws InvalidJobStateChangeException { if (!state.isValidStateChgTo(newState)) throw new InvalidJobStateChangeException(jobId, state, newState); } /** * Added for use by new Mantis Master to reuse old DAOs * Does not do state transition validation * * @param state * @param when * @param reason */ public void setStateNoValidation(MantisJobState state, long when, JobCompletedReason reason) { this.state = state; switch (state) { case Accepted: this.acceptedAt = when; break; case Launched: this.launchedAt = when; break; case StartInitiated: this.startingAt = when; break; case Started: this.startedAt = when; break; case Failed: this.completedAt = when; this.reason = reason == null ? JobCompletedReason.Lost : reason; break; case Completed: this.completedAt = when; this.reason = reason == null ? JobCompletedReason.Normal : reason; break; default: assert false : "Unexpected job state to set"; } } public void setState(MantisJobState state, long when, JobCompletedReason reason) throws InvalidJobStateChangeException { validateStateChange(state); this.state = state; switch (state) { case Accepted: this.acceptedAt = when; break; case Launched: this.launchedAt = when; break; case StartInitiated: this.startingAt = when; break; case Started: this.startedAt = when; break; case Failed: this.completedAt = when; this.reason = reason == null ? JobCompletedReason.Lost : reason; break; case Completed: this.completedAt = when; this.reason = reason == null ? JobCompletedReason.Normal : reason; break; default: assert false : "Unexpected job state to set"; } } @Override public MantisJobState getState() { return state; } @Override public String getSlave() { return slave; } public void setSlave(String slave) { this.slave = slave; } public Optional<String> getCluster() { return cluster; } public void setCluster(final Optional<String> cluster) { this.cluster = cluster; } @Override public String getSlaveID() { return slaveID; } public void setSlaveID(String slaveID) { this.slaveID = slaveID; } @Override public long getAcceptedAt() { return acceptedAt; } public void setAcceptedAt(long when) { this.acceptedAt = when; } @Override public long getLaunchedAt() { return launchedAt; } public void setLaunchedAt(long when) { this.launchedAt = when; } @Override public long getStartingAt() { return startingAt; } public void setStartingAt(long when) { this.startingAt = when; } @Override public long getStartedAt() { return startedAt; } public void setStartedAt(long when) { this.startedAt = when; } @Override public long getCompletedAt() { return completedAt; } public void setCompletedAt(long when) { this.completedAt = when; } @Override public JobCompletedReason getReason() { return reason; } public void setReason(JobCompletedReason reason) { this.reason = reason; } @Override public String toString() { return "Worker " + workerNumber + " state=" + state + ", acceptedAt=" + acceptedAt + ((launchedAt == 0) ? "" : ", launchedAt=" + launchedAt) + ((startingAt == 0) ? "" : ", startingAt=" + startingAt) + ((startedAt == 0) ? "" : ", startedAt=" + startedAt) + ((completedAt == 0) ? "" : ", completedAt=" + completedAt) + ", #ports=" + ports.size() + ", ports=" + ports; } }
4,340
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/MantisJobMetadataWritable.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; import java.net.URL; import java.util.Collection; import java.util.LinkedList; import java.util.List; import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.ReentrantLock; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.common.Label; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.parameter.Parameter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; //import com.google.common.collect.Lists; public class MantisJobMetadataWritable implements MantisJobMetadata { private static final Logger logger = LoggerFactory.getLogger(MantisJobMetadataWritable.class); private final String user; private final JobSla sla; private final long subscriptionTimeoutSecs; private final List<Label> labels; @JsonIgnore private final ConcurrentMap<Integer, MantisStageMetadataWritable> stageMetadataMap; @JsonIgnore private final ConcurrentMap<Integer, Integer> workerNumberToStageMap; @JsonIgnore private final ReentrantLock lock = new ReentrantLock(); private String jobId; private String name; private long submittedAt; private long startedAt = DEFAULT_STARTED_AT_EPOCH; private URL jarUrl; private volatile MantisJobState state; private int numStages; private List<Parameter> parameters; private int nextWorkerNumberToUse = 1; private WorkerMigrationConfig migrationConfig; @JsonIgnore private Object sink; // ToDo need to figure out what object we store for sink @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public MantisJobMetadataWritable(@JsonProperty("jobId") String jobId, @JsonProperty("name") String name, @JsonProperty("user") String user, @JsonProperty("submittedAt") long submittedAt, @JsonProperty("startedAt") long startedAt, @JsonProperty("jarUrl") URL jarUrl, @JsonProperty("numStages") int numStages, @JsonProperty("sla") JobSla sla, @JsonProperty("state") MantisJobState state, @JsonProperty("subscriptionTimeoutSecs") long subscriptionTimeoutSecs, @JsonProperty("parameters") List<Parameter> parameters, @JsonProperty("nextWorkerNumberToUse") int nextWorkerNumberToUse, @JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig, @JsonProperty("labels") List<Label> labels) { this.jobId = jobId; this.name = name; this.user = user; this.submittedAt = submittedAt; this.startedAt = startedAt; this.jarUrl = jarUrl; this.numStages = numStages; this.sla = sla; this.state = state == null ? MantisJobState.Accepted : state; this.subscriptionTimeoutSecs = subscriptionTimeoutSecs; this.stageMetadataMap = new ConcurrentHashMap<>(); this.workerNumberToStageMap = new ConcurrentHashMap<>(); if (parameters == null) { this.parameters = new LinkedList<Parameter>(); } else { this.parameters = parameters; } if (labels == null) { this.labels = new LinkedList<>(); } else { this.labels = labels; } this.nextWorkerNumberToUse = nextWorkerNumberToUse; this.migrationConfig = Optional.ofNullable(migrationConfig).orElse(WorkerMigrationConfig.DEFAULT); } @Override public AutoCloseable obtainLock() { lock.lock(); return new AutoCloseable() { @Override public void close() throws IllegalMonitorStateException { lock.unlock(); } }; } @Override public String getJobId() { return jobId; } @Override public String getName() { return name; } @Override public String getUser() { return user; } @Override public long getSubmittedAt() { return submittedAt; } @Override public long getStartedAt() { return startedAt;} @Override public URL getJarUrl() { return jarUrl; } @Override public JobSla getSla() { return sla; } @Override public List<Parameter> getParameters() { return parameters; } @Override public List<Label> getLabels() { return labels; } @Override public long getSubscriptionTimeoutSecs() { return subscriptionTimeoutSecs; } @Override public int getNextWorkerNumberToUse() { return nextWorkerNumberToUse; } public void setNextWorkerNumberToUse(int n) { this.nextWorkerNumberToUse = n; } @Override public WorkerMigrationConfig getMigrationConfig() { return this.migrationConfig; } void setJobState(MantisJobState state) throws InvalidJobStateChangeException { if (!this.state.isValidStateChgTo(state)) throw new InvalidJobStateChangeException(jobId, this.state, state); this.state = state; } @Override public MantisJobState getState() { return state; } @JsonIgnore @Override public Collection<? extends MantisStageMetadata> getStageMetadata() { return stageMetadataMap.values(); } @Override public int getNumStages() { return numStages; } @JsonIgnore @Override public MantisStageMetadata getStageMetadata(int stageNum) { return stageMetadataMap.get(stageNum); } /** * Add job stage if absent, returning true if it was actually added. * * @param msmd The stage's metadata object. * * @return true if actually added, false otherwise. */ public boolean addJobStageIfAbsent(MantisStageMetadataWritable msmd) { return stageMetadataMap.putIfAbsent(msmd.getStageNum(), msmd) == null; } public boolean addWorkerMedata(int stageNum, MantisWorkerMetadata workerMetadata, MantisWorkerMetadata replacedWorker) throws InvalidJobException { boolean result = true; if (!stageMetadataMap.get(stageNum).replaceWorkerIndex(workerMetadata, replacedWorker)) result = false; Integer integer = workerNumberToStageMap.put(workerMetadata.getWorkerNumber(), stageNum); if (integer != null && integer != stageNum) { logger.error(String.format("Unexpected to put worker number mapping from %d to stage %d for job %s, prev mapping to stage %d", workerMetadata.getWorkerNumber(), stageNum, workerMetadata.getJobId(), integer)); } return result; } @JsonIgnore @Override public MantisWorkerMetadata getWorkerByIndex(int stageNumber, int workerIndex) throws InvalidJobException { MantisStageMetadata stage = stageMetadataMap.get(stageNumber); if (stage == null) throw new InvalidJobException(jobId, stageNumber, workerIndex); return stage.getWorkerByIndex(workerIndex); } @JsonIgnore @Override public MantisWorkerMetadata getWorkerByNumber(int workerNumber) throws InvalidJobException { Integer stageNumber = workerNumberToStageMap.get(workerNumber); if (stageNumber == null) throw new InvalidJobException(jobId, -1, workerNumber); MantisStageMetadata stage = stageMetadataMap.get(stageNumber); if (stage == null) throw new InvalidJobException(jobId, stageNumber, workerNumber); return stage.getWorkerByWorkerNumber(workerNumber); } @JsonIgnore public int getMaxWorkerNumber() { // Expected to be called only during initialization, no need to synchronize/lock. // Resubmitted workers are expected to have a worker number greater than those they replace. int max = -1; for (int id : workerNumberToStageMap.keySet()) if (max < id) max = id; return max; } @Override public String toString() { return "MantisJobMetadataWritable{" + "user='" + user + '\'' + ", sla=" + sla + ", subscriptionTimeoutSecs=" + subscriptionTimeoutSecs + ", labels=" + labels + ", stageMetadataMap=" + stageMetadataMap + ", workerNumberToStageMap=" + workerNumberToStageMap + ", jobId='" + jobId + '\'' + ", name='" + name + '\'' + ", submittedAt=" + submittedAt + ", startedAt=" + startedAt + ", jarUrl=" + jarUrl + ", state=" + state + ", numStages=" + numStages + ", parameters=" + parameters + ", nextWorkerNumberToUse=" + nextWorkerNumberToUse + ", migrationConfig=" + migrationConfig + '}'; } }
4,341
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/InvalidNamedJobException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; public class InvalidNamedJobException extends Exception { public InvalidNamedJobException(String message) { super(message); } public InvalidNamedJobException(String message, Throwable cause) { super(message, cause); } }
4,342
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/SimpleCachedFileStorageProvider.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; import java.io.File; import java.io.FileInputStream; import java.io.FilenameFilter; import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference; import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module; import com.netflix.fenzo.functions.Action1; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; /** * Simple File based storage provider. Intended mainly as a sample implementation for * {@link MantisStorageProvider} interface. This implementation is complete in its functionality, but, isn't * expected to be scalable or performant for production loads. * <P>This implementation uses <code>/tmp/MantisSpool/</code> as the spool directory. The directory is created * if not present already. It will fail only if either a file with that name exists or if a directory with that * name exists but isn't writable.</P> */ public class SimpleCachedFileStorageProvider implements MantisStorageProvider { private final static String SPOOL_DIR = "/tmp/MantisSpool"; private final static String ARCHIVE_DIR = "/tmp/MantisArchive"; private static final Logger logger = LoggerFactory.getLogger(SimpleCachedFileStorageProvider.class); private static final String NAMED_JOBS_DIR = SPOOL_DIR + "/namedJobs"; private static final String NAMED_JOBS_COMPLETED_JOBS_FILE_NAME_SUFFIX = "-completedJobs"; private static final String ACTIVE_VMS_FILENAME = "activeVMs"; private final ObjectMapper mapper = new ObjectMapper().registerModule(new Jdk8Module()); private boolean _debug = false; public SimpleCachedFileStorageProvider() { logger.debug(SimpleCachedFileStorageProvider.class.getName() + " created"); mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); } private static String getWorkerFilename(String prefix, String jobId, int workerIndex, int workerNumber) { return prefix + File.separator + "Worker-" + jobId + "-" + workerIndex + "-" + workerNumber; } @Override public void storeNewJob(MantisJobMetadataWritable jobMetadata) throws JobAlreadyExistsException, IOException { File tmpFile = new File(SPOOL_DIR + "/Job-" + jobMetadata.getJobId()); if (!tmpFile.createNewFile()) { throw new JobAlreadyExistsException(jobMetadata.getJobId()); } try (PrintWriter pwrtr = new PrintWriter(tmpFile)) { mapper.writeValue(pwrtr, jobMetadata); } } @Override public void updateJob(MantisJobMetadataWritable jobMetadata) throws InvalidJobException, IOException { File jobFile = new File(getJobFileName(SPOOL_DIR, jobMetadata.getJobId())); if (!jobFile.exists()) { throw new InvalidJobException(jobMetadata.getJobId()); } jobFile.delete(); jobFile.createNewFile(); try (PrintWriter pwrtr = new PrintWriter(jobFile)) { mapper.writeValue(pwrtr, jobMetadata); } } private String getJobFileName(String dirName, String jobId) { return dirName + "/Job-" + jobId; } @Override public void archiveJob(String jobId) throws IOException { File jobFile = new File(getJobFileName(SPOOL_DIR, jobId)); jobFile.renameTo(new File(getJobFileName(ARCHIVE_DIR, jobId))); archiveStages(jobId); archiveWorkers(jobId); } @Override public MantisJobMetadataWritable loadArchivedJob(String jobId) throws IOException { File jobFile = new File(getJobFileName(ARCHIVE_DIR, jobId)); MantisJobMetadataWritable job = null; if (jobFile.exists()) { try (FileInputStream fis = new FileInputStream(jobFile)) { job = mapper.readValue(fis, MantisJobMetadataWritable.class); } for (MantisStageMetadataWritable stage : loadArchivedJobStages(jobId)) job.addJobStageIfAbsent(stage); for (MantisWorkerMetadataWritable worker : loadArchivedJobWorkers(jobId, job.getNextWorkerNumberToUse())) { try { job.addWorkerMedata(worker.getStageNum(), worker, null); } catch (InvalidJobException e) { logger.warn("Unexpected error adding worker index=" + worker.getWorkerIndex() + ", number=" + worker.getWorkerNumber() + " for job " + jobId + ": " + e.getMessage(), e); } } } return job; } private List<MantisStageMetadataWritable> loadArchivedJobStages(String jobId) throws IOException { File archiveDirFile = new File(ARCHIVE_DIR); List<MantisStageMetadataWritable> result = new LinkedList<>(); for (File jobFile : archiveDirFile.listFiles((dir, name) -> { return name.startsWith("Stage-" + jobId + "-"); })) { try (FileInputStream fis = new FileInputStream(jobFile)) { result.add(mapper.readValue(fis, MantisStageMetadataWritable.class)); } } return result; } private List<MantisWorkerMetadataWritable> loadArchivedJobWorkers(String jobId, int maxWorkerNumber) throws IOException { File archiveDir = new File(ARCHIVE_DIR); List<MantisWorkerMetadataWritable> result = new LinkedList<>(); for (File wFile : archiveDir.listFiles((dir, name) -> { return name.startsWith("Worker-" + jobId + "-"); })) { try (FileInputStream fis = new FileInputStream(wFile)) { result.add(mapper.readValue(fis, MantisWorkerMetadataWritable.class)); } } return result; } @Override public void deleteJob(String jobId) throws InvalidJobException, IOException { File tmpFile = new File(SPOOL_DIR + "/Job-" + jobId); tmpFile.delete(); deleteFiles(SPOOL_DIR, jobId, "Stage-"); deleteFiles(SPOOL_DIR, jobId, "Worker-"); tmpFile = new File(ARCHIVE_DIR + "/Job-" + jobId); tmpFile.delete(); deleteFiles(ARCHIVE_DIR, jobId, "Stage-"); deleteFiles(ARCHIVE_DIR, jobId, "Worker-"); } private void deleteFiles(String dirName, final String jobId, final String filePrefix) { File spoolDir = new File(dirName); for (File stageFile : spoolDir.listFiles((dir, name) -> { return name.startsWith(filePrefix + jobId + "-"); })) { stageFile.delete(); } } @Override public void storeMantisStage(MantisStageMetadataWritable msmd) throws IOException { storeStage(msmd, false); } private void storeStage(MantisStageMetadataWritable msmd, boolean rewrite) throws IOException { File stageFile = new File(getStageFileName(SPOOL_DIR, msmd.getJobId(), msmd.getStageNum())); if (rewrite) stageFile.delete(); try {stageFile.createNewFile();} catch (SecurityException se) { throw new IOException("Can't create new file " + stageFile.getAbsolutePath(), se); } try (PrintWriter pwrtr = new PrintWriter(stageFile)) { mapper.writeValue(pwrtr, msmd); } } @Override public void updateMantisStage(MantisStageMetadataWritable msmd) throws IOException { storeStage(msmd, true); } private void archiveStages(String jobId) { File spoolDir = new File(SPOOL_DIR); for (File sFile : spoolDir.listFiles((dir, name) -> { return name.startsWith("Stage-" + jobId + "-"); })) { sFile.renameTo(new File(ARCHIVE_DIR + File.separator + sFile.getName())); } } private String getStageFileName(String dirName, String jobId, int stageNum) { return dirName + "/Stage-" + jobId + "-" + stageNum; } @Override public void storeWorker(MantisWorkerMetadataWritable workerMetadata) throws IOException { storeWorker(workerMetadata.getJobId(), workerMetadata, false); } @Override public void storeWorkers(String jobId, List<MantisWorkerMetadataWritable> workers) throws IOException { for (MantisWorkerMetadataWritable w : workers) storeWorker(w); } @Override public void storeAndUpdateWorkers(MantisWorkerMetadataWritable worker1, MantisWorkerMetadataWritable worker2) throws InvalidJobException, IOException { if (!worker1.getJobId().equals(worker2.getJobId())) throw new InvalidJobException(worker1.getJobId()); // As the name indicates, this is a simple storage implementation that does not actually have the // atomicity. Instead, we update worker2, followed by storing worker1 updateWorker(worker2); storeWorker(worker1); } @Override public void updateWorker(MantisWorkerMetadataWritable mwmd) throws IOException { storeWorker(mwmd.getJobId(), mwmd, true); } private void createDir(String dirName) { File spoolDirLocation = new File(dirName); if (spoolDirLocation.exists() && !(spoolDirLocation.isDirectory() && spoolDirLocation.canWrite())) throw new UnsupportedOperationException("Directory [" + dirName + "] not writeable"); if (!spoolDirLocation.exists()) try {spoolDirLocation.mkdirs();} catch (SecurityException se) { throw new UnsupportedOperationException("Can't create dir for writing state - " + se.getMessage(), se); } } @Override public List<MantisJobMetadataWritable> initJobs() throws IOException { createDir(SPOOL_DIR); createDir(ARCHIVE_DIR); List<MantisJobMetadataWritable> retList = new ArrayList<>(); File spoolDirFile = new File(SPOOL_DIR); for (File jobFile : spoolDirFile.listFiles((dir, name) -> { return name.startsWith("Job-"); })) { try (FileInputStream fis = new FileInputStream(jobFile)) { MantisJobMetadataWritable mjmd = mapper.readValue(fis, MantisJobMetadataWritable.class); for (MantisStageMetadataWritable msmd : readStagesFor(spoolDirFile, mjmd.getJobId())) mjmd.addJobStageIfAbsent(msmd); for (MantisWorkerMetadataWritable mwmd : readWorkersFor(spoolDirFile, mjmd.getJobId())) mjmd.addWorkerMedata(mwmd.getStageNum(), mwmd, null); retList.add(mjmd); } catch (IOException e) { logger.error("Error reading job metadata - " + e.getMessage()); } catch (InvalidJobException e) { // shouldn't happen logger.warn(e.getMessage()); } } if (_debug) { // print all jobs read for (MantisJobMetadata mjmd : retList) { logger.info(" JOB " + mjmd.getJobId()); for (MantisStageMetadata msmd : mjmd.getStageMetadata()) { logger.info(" Stage " + msmd.getStageNum() + " of " + msmd.getNumStages()); for (MantisWorkerMetadata mwmd : msmd.getWorkerByIndexMetadataSet()) { logger.info(" " + mwmd); } } } } return retList; } @Override public Observable<MantisJobMetadata> initArchivedJobs() { final File archiveDir = new File(ARCHIVE_DIR); return Observable.create(subscriber -> { for (File jobFile : archiveDir.listFiles((dir, name) -> { return name.startsWith("Job-"); })) { try (FileInputStream fis = new FileInputStream(jobFile)) { MantisJobMetadataWritable job = mapper.readValue(fis, MantisJobMetadataWritable.class); for (MantisStageMetadataWritable msmd : readStagesFor(archiveDir, job.getJobId())) job.addJobStageIfAbsent(msmd); for (MantisWorkerMetadataWritable mwmd : readWorkersFor(archiveDir, job.getJobId())) { try { job.addWorkerMedata(mwmd.getStageNum(), mwmd, null); } catch (InvalidJobException e) { // shouldn't happen } } subscriber.onNext(job); } catch (IOException e) { subscriber.onError(e); } } subscriber.onCompleted(); }); } @Override public List<NamedJob> initNamedJobs() throws IOException { createDir(NAMED_JOBS_DIR); List<NamedJob> returnList = new ArrayList<>(); File namedJobsDir = new File(NAMED_JOBS_DIR); for (File namedJobFile : namedJobsDir.listFiles( (dir, name) -> !name.endsWith(NAMED_JOBS_COMPLETED_JOBS_FILE_NAME_SUFFIX) )) { try (FileInputStream fis = new FileInputStream(namedJobFile)) { returnList.add(mapper.readValue(fis, NamedJob.class)); } } return returnList; } @Override public Observable<NamedJob.CompletedJob> initNamedJobCompletedJobs() throws IOException { createDir(NAMED_JOBS_DIR); List<NamedJob> returnList = new ArrayList<>(); File namedJobsDir = new File(NAMED_JOBS_DIR); return Observable.create(subscriber -> { for (File namedJobFile : namedJobsDir.listFiles( (dir, name) -> name.endsWith(NAMED_JOBS_COMPLETED_JOBS_FILE_NAME_SUFFIX) )) { try (FileInputStream fis = new FileInputStream(namedJobFile)) { final List<NamedJob.CompletedJob> list = mapper.readValue(fis, new TypeReference<List<NamedJob.CompletedJob>>() {}); if (list != null && !list.isEmpty()) list.forEach(subscriber::onNext); } catch (Exception e) { subscriber.onError(e); } } subscriber.onCompleted(); }); } @Override public void shutdown() { // no clean up needed } private void storeWorker(String jobId, MantisWorkerMetadataWritable workerMetadata, boolean rewrite) throws IOException { File workerFile = new File(getWorkerFilename(SPOOL_DIR, jobId, workerMetadata.getWorkerIndex(), workerMetadata.getWorkerNumber())); if (rewrite) workerFile.delete(); workerFile.createNewFile(); try (PrintWriter pwrtr = new PrintWriter(workerFile)) { mapper.writeValue(pwrtr, workerMetadata); } } private List<MantisStageMetadataWritable> readStagesFor(File spoolDir, final String id) throws IOException { List<MantisStageMetadataWritable> stageList = new ArrayList<>(); for (File stageFile : spoolDir.listFiles((dir, name) -> { return name.startsWith("Stage-" + id + "-"); })) { logger.info("Reading stage file " + stageFile.getName()); try (FileInputStream fis = new FileInputStream(stageFile)) { stageList.add(mapper.readValue(fis, MantisStageMetadataWritable.class)); } } return stageList; } private List<MantisWorkerMetadataWritable> readWorkersFor(File spoolDir, final String id) { List<MantisWorkerMetadataWritable> workerList = new ArrayList<>(); for (File workerFile : spoolDir.listFiles((dir, name) -> { return name.startsWith("Worker-" + id + "-"); })) { logger.info("Reading worker file " + workerFile.getName()); try (FileInputStream fis = new FileInputStream(workerFile)) { workerList.add(mapper.readValue(fis, MantisWorkerMetadataWritable.class)); } catch (IOException e) { e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates. } } return workerList; } private void archiveWorkers(String jobId) throws IOException { File spoolDir = new File(SPOOL_DIR); for (File wFile : spoolDir.listFiles((dir, name) -> { return name.startsWith("Worker-" + jobId + "-"); })) { wFile.renameTo(new File(ARCHIVE_DIR + File.separator + wFile.getName())); } } @Override public void archiveWorker(MantisWorkerMetadataWritable mwmd) throws IOException { File wFile = new File(getWorkerFilename(SPOOL_DIR, mwmd.getJobId(), mwmd.getWorkerIndex(), mwmd.getWorkerNumber())); if (wFile.exists()) wFile.renameTo(new File(getWorkerFilename(ARCHIVE_DIR, mwmd.getJobId(), mwmd.getWorkerIndex(), mwmd.getWorkerNumber()))); } public List<MantisWorkerMetadataWritable> getArchivedWorkers(final String jobid) throws IOException { List<MantisWorkerMetadataWritable> workerList = new ArrayList<>(); File archiveDir = new File(ARCHIVE_DIR); for (File workerFile : archiveDir.listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.startsWith("Worker-" + jobid + "-"); } })) { try (FileInputStream fis = new FileInputStream(workerFile)) { workerList.add(mapper.readValue(fis, MantisWorkerMetadataWritable.class)); } } return workerList; } private String getNamedJobFileName(String name) { return NAMED_JOBS_DIR + "/" + name + ".job"; } @Override public void storeNewNamedJob(NamedJob namedJob) throws JobNameAlreadyExistsException, IOException { File tmpFile = new File(NAMED_JOBS_DIR + "/" + namedJob.getName()); logger.info("Storing job cluster " + namedJob.getName() + " to file " + tmpFile.getAbsolutePath()); if (!tmpFile.createNewFile()) throw new JobNameAlreadyExistsException(namedJob.getName()); try (PrintWriter pwrtr = new PrintWriter(tmpFile)) { mapper.writeValue(pwrtr, namedJob); } } @Override public void updateNamedJob(NamedJob namedJob) throws InvalidNamedJobException, IOException { File jobFile = new File(NAMED_JOBS_DIR + "/" + namedJob.getName()); if (!jobFile.exists()) throw new InvalidNamedJobException(namedJob.getName() + " doesn't exist"); jobFile.delete(); jobFile.createNewFile(); try (PrintWriter pwrtr = new PrintWriter(jobFile)) { mapper.writeValue(pwrtr, namedJob); } } @Override public boolean deleteNamedJob(String name) throws IOException { File jobFile = new File(NAMED_JOBS_DIR + File.separator + name); final boolean deleted = jobFile.delete(); File completedJobsFile = new File(NAMED_JOBS_DIR + File.separator + name + NAMED_JOBS_COMPLETED_JOBS_FILE_NAME_SUFFIX); completedJobsFile.delete(); return deleted; } @Override public void storeCompletedJobForNamedJob(String name, NamedJob.CompletedJob job) throws IOException { modifyCompletedJobsForNamedJob(name, list -> list.add(job)); } private void modifyCompletedJobsForNamedJob(String name, Action1<List<NamedJob.CompletedJob>> modifier) throws IOException { File completedJobsFile = new File(NAMED_JOBS_DIR + File.separator + name + NAMED_JOBS_COMPLETED_JOBS_FILE_NAME_SUFFIX); List<NamedJob.CompletedJob> completedJobs = new LinkedList<>(); if (completedJobsFile.exists()) { try (FileInputStream fis = new FileInputStream(completedJobsFile)) { completedJobs.addAll(mapper.readValue(fis, new TypeReference<List<NamedJob.CompletedJob>>() {})); } } modifier.call(completedJobs); completedJobsFile.delete(); completedJobsFile.createNewFile(); try (PrintWriter w = new PrintWriter(completedJobsFile)) { mapper.writeValue(w, completedJobs); } } @Override public void removeCompledtedJobForNamedJob(String name, String jobId) throws IOException { modifyCompletedJobsForNamedJob(name, list -> { if (list != null) { final Iterator<NamedJob.CompletedJob> iterator = list.iterator(); while (iterator.hasNext()) { final NamedJob.CompletedJob next = iterator.next(); if (next.getJobId().equals(jobId)) { iterator.remove(); break; } } } }); } @Override public void setActiveVmAttributeValuesList(List<String> vmAttributesList) throws IOException { File activeSlavesFile = new File(SPOOL_DIR + File.separator + ACTIVE_VMS_FILENAME); logger.info("Storing file " + activeSlavesFile.getAbsolutePath()); if (activeSlavesFile.exists()) activeSlavesFile.delete(); activeSlavesFile.createNewFile(); try (PrintWriter wrtr = new PrintWriter(activeSlavesFile)) { mapper.writeValue(wrtr, vmAttributesList); } } @Override public List<String> initActiveVmAttributeValuesList() throws IOException { File activeSlavesFile = new File(SPOOL_DIR + File.separator + ACTIVE_VMS_FILENAME); if (!activeSlavesFile.exists()) return Collections.EMPTY_LIST; try (FileInputStream fis = new FileInputStream(activeSlavesFile)) { return mapper.readValue(fis, new TypeReference<List<String>>() {}); } } }
4,343
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/MantisStageMetadataWritable.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; import java.util.Collection; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class MantisStageMetadataWritable implements MantisStageMetadata { private static final Logger logger = LoggerFactory.getLogger(MantisStageMetadataWritable.class); @JsonIgnore private final ConcurrentMap<Integer, MantisWorkerMetadata> workerByIndexMetadataSet; @JsonIgnore private final ConcurrentMap<Integer, MantisWorkerMetadata> workerByNumberMetadataSet; private String jobId; private int stageNum; private int numStages; private MachineDefinition machineDefinition; private int numWorkers; private List<JobConstraints> hardConstraints; private List<JobConstraints> softConstraints; private StageScalingPolicy scalingPolicy; private boolean scalable; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public MantisStageMetadataWritable(@JsonProperty("jobId") String jobId, @JsonProperty("stageNum") int stageNum, @JsonProperty("numStages") int numStages, @JsonProperty("machineDefinition") MachineDefinition machineDefinition, @JsonProperty("numWorkers") int numWorkers, @JsonProperty("hardConstraints") List<JobConstraints> hardConstraints, @JsonProperty("softConstraints") List<JobConstraints> softConstraints, @JsonProperty("scalingPolicy") StageScalingPolicy scalingPolicy, @JsonProperty("scalable") boolean scalable) { this.jobId = jobId; this.stageNum = stageNum; this.numStages = numStages; this.machineDefinition = machineDefinition; this.numWorkers = numWorkers; this.hardConstraints = hardConstraints; this.softConstraints = softConstraints; this.scalingPolicy = scalingPolicy; this.scalable = scalable; workerByIndexMetadataSet = new ConcurrentHashMap<>(); workerByNumberMetadataSet = new ConcurrentHashMap<>(); } @Override public String getJobId() { return jobId; } @Override public int getStageNum() { return stageNum; } @Override public int getNumStages() { return numStages; } @Override public int getNumWorkers() { return numWorkers; } @JsonIgnore public int getNumActiveWorkers() { // we traverse the current worker for each index int active = 0; for (MantisWorkerMetadata w : workerByIndexMetadataSet.values()) { if (!MantisJobState.isTerminalState(w.getState())) active++; } return active; } // This call is unsafe to be called by itself. Typically this is called from within a block that // locks the corresponding job metadata object and also does the right things for reflecting upon the change. E.g., if increasing // the number, then create the new workers. When decrementing, call the unsafeRemoveWorker() to remove // the additional workers. public void unsafeSetNumWorkers(int numWorkers) { this.numWorkers = numWorkers; } public boolean unsafeRemoveWorker(int index, int number) { final MantisWorkerMetadata removedIdx = workerByIndexMetadataSet.remove(index); final MantisWorkerMetadata removedNum = workerByNumberMetadataSet.remove(number); return removedIdx != null && removedNum != null && removedIdx.getWorkerNumber() == number && removedNum.getWorkerIndex() == index; } @Override public List<JobConstraints> getHardConstraints() { return hardConstraints; } @Override public List<JobConstraints> getSoftConstraints() { return softConstraints; } @Override public StageScalingPolicy getScalingPolicy() { return scalingPolicy; } public void setScalingPolicy(StageScalingPolicy scalingPolicy) { this.scalingPolicy = scalingPolicy; } @Override public boolean getScalable() { return scalable; } public void setScalable(boolean scalable) { this.scalable = scalable; } @Override public MachineDefinition getMachineDefinition() { return machineDefinition; } @JsonIgnore @Override public Collection<MantisWorkerMetadata> getWorkerByIndexMetadataSet() { return workerByIndexMetadataSet.values(); } @JsonIgnore @Override public Collection<MantisWorkerMetadata> getAllWorkers() { return workerByNumberMetadataSet.values(); } @JsonIgnore @Override public MantisWorkerMetadata getWorkerByIndex(int workerId) throws InvalidJobException { MantisWorkerMetadata mwmd = workerByIndexMetadataSet.get(workerId); if (mwmd == null) throw new InvalidJobException(jobId, -1, workerId); return mwmd; } @JsonIgnore @Override public MantisWorkerMetadata getWorkerByWorkerNumber(int workerNumber) throws InvalidJobException { MantisWorkerMetadata mwmd = workerByNumberMetadataSet.get(workerNumber); if (mwmd == null) throw new InvalidJobException(jobId, -1, workerNumber); return mwmd; } MantisWorkerMetadataWritable removeWorkerInErrorState(int workerNumber) { MantisWorkerMetadataWritable mwmd = (MantisWorkerMetadataWritable) workerByNumberMetadataSet.get(workerNumber); if (mwmd != null && MantisJobState.isErrorState(mwmd.getState())) { workerByNumberMetadataSet.remove(workerNumber); return mwmd; } return null; } Collection<MantisWorkerMetadataWritable> removeArchiveableWorkers() { Collection<MantisWorkerMetadataWritable> removedWorkers = new LinkedList<>(); Set<Integer> workerNumbers = new HashSet<>(workerByNumberMetadataSet.keySet()); for (Integer w : workerNumbers) { MantisWorkerMetadata mwmd = workerByNumberMetadataSet.get(w); final MantisWorkerMetadata wi = workerByIndexMetadataSet.get(mwmd.getWorkerIndex()); if (wi == null || wi.getWorkerNumber() != mwmd.getWorkerNumber()) { workerByNumberMetadataSet.remove(w); removedWorkers.add((MantisWorkerMetadataWritable) mwmd); } } return removedWorkers; } public boolean replaceWorkerIndex(MantisWorkerMetadata newWorker, MantisWorkerMetadata oldWorker) throws InvalidJobException { int index = newWorker.getWorkerIndex(); boolean result = true; if (!MantisJobState.isErrorState(newWorker.getState())) { if (oldWorker == null) { if (workerByIndexMetadataSet.putIfAbsent(index, newWorker) != null) result = false; } else { if (oldWorker.getWorkerIndex() != index) { throw new InvalidJobException(newWorker.getJobId(), stageNum, oldWorker.getWorkerIndex()); } MantisWorkerMetadata mwmd = workerByIndexMetadataSet.put(index, newWorker); if (mwmd.getWorkerNumber() != oldWorker.getWorkerNumber()) { workerByIndexMetadataSet.put(index, mwmd); result = false; logger.info("Did not replace worker " + oldWorker.getWorkerNumber() + " with " + newWorker.getWorkerNumber() + " for index " + newWorker.getWorkerIndex() + " of job " + jobId + ", different worker " + mwmd.getWorkerNumber() + " exists already"); } // else // logger.info("Replaced worker " + oldWorker.getWorkerNumber() + " with " + newWorker.getWorkerNumber() + // " for index " + newWorker.getWorkerIndex() + " of job " + jobId); } } else if (oldWorker != null) result = false; if (result) workerByNumberMetadataSet.put(newWorker.getWorkerNumber(), newWorker); return result; } }
4,344
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/MantisStorageProvider.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; import java.io.IOException; import java.util.List; import rx.Observable; public interface MantisStorageProvider { /** * Store to persistence newly created job with given metadata object. This is expected to fail if job with the same * jobId as given in the <code>jobMetadata</code> object already exists in persistence store. * * @param jobMetadata * * @throws JobAlreadyExistsException If a job with same id as in the given metadata object already exists * @throws IOException */ void storeNewJob(MantisJobMetadataWritable jobMetadata) throws JobAlreadyExistsException, IOException; void updateJob(MantisJobMetadataWritable jobMetadata) throws InvalidJobException, IOException; /** * Mark the job as not active and move it to an inactive archived collection of jobs. * * @param jobId The Job Id of the job to archive * * @throws IOException upon errors with storage invocation */ void archiveJob(String jobId) throws IOException; /** * Delete the job metadata permanently. * * @param jobId The Job Id of the job to delete * * @throws InvalidJobException If there is no such job to delete * @throws IOException Upon errors with storage invocation */ void deleteJob(String jobId) throws InvalidJobException, IOException; void storeMantisStage(MantisStageMetadataWritable msmd) throws IOException; void updateMantisStage(MantisStageMetadataWritable msmd) throws IOException; /** * Store a new worker for the given job and stage number. This will be called only once for a given * worker. However, it is possible that concurrent calls can be made on a <code>jobId</code>, each with a * different worker. * * @param workerMetadata The worker metadata to store. * * @throws IOException */ void storeWorker(MantisWorkerMetadataWritable workerMetadata) throws IOException; /** * Store multiple new workers for the give job. This is called only once for a given worker. This method enables * optimization by calling storage once for multiple workers. * * @param jobId The Job ID. * @param workers The list of workers to store. * * @throws IOException if there were errors storing the workers. */ void storeWorkers(String jobId, List<MantisWorkerMetadataWritable> workers) throws IOException; /** * Store a new worker and update existing worker of a job atomically. Either both are stored or none is. * * @param worker1 Existing worker to update. * @param worker2 New worker to store. * * @throws IOException * @throws InvalidJobException If workers don't have the same JobId. */ void storeAndUpdateWorkers(MantisWorkerMetadataWritable worker1, MantisWorkerMetadataWritable worker2) throws InvalidJobException, IOException; /** * Update (overwrite) existing worker metadata with the given metadata. * * @param mwmd Worker metadata to update * * @throws IOException */ void updateWorker(MantisWorkerMetadataWritable mwmd) throws IOException; /** * Initialize and return all existing jobs from persistence, including all corresponding job stages and workers. * * @return List of job metadata objects * * @throws IOException */ List<MantisJobMetadataWritable> initJobs() throws IOException; Observable<MantisJobMetadata> initArchivedJobs(); /** * Initialize and return all existing NamedJobs from persistence. * * @return List of {@link NamedJob} objects. * * @throws IOException Upon error connecting to or reading from persistence. */ List<NamedJob> initNamedJobs() throws IOException; /** * Initialize and return completed jobs of all NamedJobs in the system. * * @return An Observable of all completed jobs for all NamedJobs. * * @throws IOException Upon error connecting to or reading from persistence. */ Observable<NamedJob.CompletedJob> initNamedJobCompletedJobs() throws IOException; /** * Archives worker. This is usually called when a worker enters error state. It is expected that archived workers * are moved out from regular store elsewhere so when jobs are loaded they do not contain archived workers. * * @param mwmd Worker metadata to archive * * @throws IOException */ void archiveWorker(MantisWorkerMetadataWritable mwmd) throws IOException; List<MantisWorkerMetadataWritable> getArchivedWorkers(String jobid) throws IOException; void storeNewNamedJob(NamedJob namedJob) throws JobNameAlreadyExistsException, IOException; void updateNamedJob(NamedJob namedJob) throws InvalidNamedJobException, IOException; boolean deleteNamedJob(String name) throws IOException; void storeCompletedJobForNamedJob(String name, NamedJob.CompletedJob job) throws IOException; void removeCompledtedJobForNamedJob(String name, String jobId) throws IOException; MantisJobMetadataWritable loadArchivedJob(String jobId) throws IOException; void shutdown(); List<String> initActiveVmAttributeValuesList() throws IOException; void setActiveVmAttributeValuesList(List<String> vmAttributesList) throws IOException; }
4,345
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/NoopStorageProvider.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; import java.io.IOException; import java.util.ArrayList; import java.util.List; import rx.Observable; public class NoopStorageProvider implements MantisStorageProvider { @Override public void storeNewJob(MantisJobMetadataWritable jobMetadata) throws JobAlreadyExistsException, IOException { } @Override public void updateJob(MantisJobMetadataWritable jobMetadata) throws InvalidJobException, IOException { } @Override public void deleteJob(String jobId) throws InvalidJobException, IOException { } @Override public void archiveJob(String jobId) throws IOException { } @Override public void storeMantisStage(MantisStageMetadataWritable msmd) throws IOException { } @Override public void updateMantisStage(MantisStageMetadataWritable msmd) throws IOException { } @Override public void storeWorker(MantisWorkerMetadataWritable workerMetadata) throws IOException { } @Override public void storeWorkers(String jobId, List<MantisWorkerMetadataWritable> workers) throws IOException { } @Override public void storeAndUpdateWorkers(MantisWorkerMetadataWritable worker1, MantisWorkerMetadataWritable worker2) throws InvalidJobException, IOException { } @Override public void updateWorker(MantisWorkerMetadataWritable mwmd) throws IOException { } @Override public List<MantisJobMetadataWritable> initJobs() throws IOException { return new ArrayList<>(); } @Override public Observable<MantisJobMetadata> initArchivedJobs() { return Observable.empty(); } @Override public List<NamedJob> initNamedJobs() throws IOException { return new ArrayList<>(); } @Override public Observable<NamedJob.CompletedJob> initNamedJobCompletedJobs() throws IOException { return Observable.empty(); } @Override public void archiveWorker(MantisWorkerMetadataWritable mwmd) throws IOException { } @Override public List<MantisWorkerMetadataWritable> getArchivedWorkers(String jobid) throws IOException { return new ArrayList<>(); } @Override public void storeNewNamedJob(NamedJob namedJob) throws JobNameAlreadyExistsException, IOException { } @Override public void updateNamedJob(NamedJob namedJob) throws InvalidNamedJobException, IOException { } @Override public boolean deleteNamedJob(String name) throws IOException { return true; } @Override public void storeCompletedJobForNamedJob(String name, NamedJob.CompletedJob job) { return; } @Override public MantisJobMetadataWritable loadArchivedJob(String jobId) throws IOException { return null; } @Override public void removeCompledtedJobForNamedJob(String name, String jobId) throws IOException { return; } @Override public void setActiveVmAttributeValuesList(List<String> vmAttributesList) throws IOException {} @Override public List<String> initActiveVmAttributeValuesList() { return null; } @Override public void shutdown() { } }
4,346
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/InvalidJobException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; public class InvalidJobException extends Exception { public InvalidJobException(String id) { super(id); } public InvalidJobException(String id, Throwable cause) { super(id, cause); } public InvalidJobException(String jobId, int stageNum, int workerId) { super(jobId + ((stageNum >= 0) ? "-stage-" + stageNum : "") + ((workerId >= 0) ? "-worker-" + workerId : "")); } public InvalidJobException(String jobId, int stageNum, int workerId, Throwable cause) { super(jobId + ((stageNum >= 0) ? "-stage-" + stageNum : "") + ((workerId >= 0) ? "-worker-" + workerId : ""), cause); } }
4,347
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/MantisJobStore.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; public final class MantisJobStore { } // //import com.google.common.cache.Cache; //import com.google.common.cache.CacheBuilder; //import io.mantisrx.common.metrics.Gauge; //import io.mantisrx.common.metrics.Metrics; //import io.mantisrx.common.metrics.MetricsRegistry; //import io.mantisrx.runtime.MantisJobDefinition; //import io.mantisrx.runtime.MantisJobState; //import io.mantisrx.runtime.descriptor.SchedulingInfo; //import io.mantisrx.runtime.descriptor.StageSchedulingInfo; //import io.mantisrx.server.core.JobCompletedReason; //import io.mantisrx.server.master.*; //import io.mantisrx.server.master.config.ConfigurationProvider; //import io.mantisrx.server.master.domain.WorkerRequest; //import org.slf4j.Logger; //import org.slf4j.LoggerFactory; //import rx.functions.Func2; // //import java.io.IOException; //import java.util.*; //import java.util.concurrent.*; //import java.util.concurrent.atomic.AtomicInteger; //import java.util.concurrent.atomic.AtomicReference; // // //public final class MantisJobStore { // // private class ArchivedWorkersCache { // private final Cache<String, ConcurrentMap<Integer, MantisWorkerMetadata>> cache; // ArchivedWorkersCache(int cacheSize) { // cache = CacheBuilder // .newBuilder() // .maximumSize(cacheSize) // .build(); // } // ConcurrentMap<Integer, MantisWorkerMetadata> getArchivedWorkerMap(final String jobId) throws ExecutionException { // return cache.get(jobId, () -> { // List<MantisWorkerMetadataWritable> workers = storageProvider.getArchivedWorkers(jobId); // ConcurrentMap<Integer, MantisWorkerMetadata> theMap = new ConcurrentHashMap<>(); // if(workers != null) { // for(MantisWorkerMetadata mwmd: workers) { // theMap.putIfAbsent(mwmd.getWorkerNumber(), mwmd); // } // } // return theMap; // }); // } // // void remove(String jobId) { // cache.invalidate(jobId); // } // } // // private class ArchivedJobsMetadataCache { // private final Cache<String, MantisJobMetadata> cache; // // ArchivedJobsMetadataCache(int cacheSize) { // cache = CacheBuilder // .newBuilder() // .maximumSize(cacheSize) // .build(); // } // // MantisJobMetadata getJob(String jobId) { // try { // return cache.get(jobId, () -> loadArchivedJob(jobId)); // } catch (ExecutionException e) { // return null; // } // } // // private MantisJobMetadata loadArchivedJob(String jobId) throws IOException, InvalidJobException, ExecutionException { // final MantisJobMetadataWritable jobMetadata = storageProvider.loadArchivedJob(jobId); // if (jobMetadata == null) // throw new ExecutionException(new InvalidJobException(jobId)); // return jobMetadata; // } // // void add(MantisJobMetadataWritable job) { // cache.put(job.getJobId(), job); // } // // void remove(String jobId) { // cache.invalidate(jobId); // } // } // // private static class TerminatedJob implements Comparable<TerminatedJob> { // private final String jobId; // private final long terminatedTime; // private TerminatedJob(String jobId, long terminatedTime) { // this.jobId = jobId; // this.terminatedTime = terminatedTime; // } // @Override // public int compareTo(TerminatedJob o) { // return Long.compare(terminatedTime, o.terminatedTime); // } // } // public static final String JOB_DELETE = "MantisJobDelete"; // private static final Logger logger = LoggerFactory.getLogger(MantisJobStore.class); // private final Func2<MantisJobStore, Map<String, MantisJobDefinition>, Collection<NamedJob>> jobsInitializer; // private final MantisStorageProvider storageProvider; // private final ConcurrentMap<String, MantisJobMetadataWritable> activeJobsMap; // private final ConcurrentMap<String, String> archivedJobIds; // private final ArchivedJobsMetadataCache archivedJobsMetadataCache; // private final ArchivedWorkersCache archivedWorkersCache; // private static final long DELETE_TERMINATED_JOBS_DELAY_SECS = 60; // private static final long maxInitTimeSecs = ConfigurationProvider.getConfig().getMasterInitTimeoutSecs(); // private final PriorityBlockingQueue<TerminatedJob> terminatedJobsToDelete; // private final MantisJobOperations jobOps; // private static final String initMillisGaugeName="JobStoreInitMillis"; // private static final String postInitMillisGaugeName="JobStorePostInitMillis"; // private final Gauge initMillis; // private final Gauge postInitMillis; // // public MantisJobStore(final MantisStorageProvider storageProvider, // final Func2<MantisJobStore, Map<String, MantisJobDefinition>, Collection<NamedJob>> jobsInitializer, // final MantisJobOperations jobOps) { // this.storageProvider = storageProvider; // this.jobsInitializer = jobsInitializer; // this.jobOps = jobOps; // activeJobsMap = new ConcurrentHashMap<>(); // archivedJobIds = new ConcurrentHashMap<>(); // archivedWorkersCache = new ArchivedWorkersCache(ConfigurationProvider.getConfig().getMaxArchivedJobsToCache()); // archivedJobsMetadataCache = new ArchivedJobsMetadataCache(ConfigurationProvider.getConfig().getMaxArchivedJobsToCache()); // terminatedJobsToDelete = new PriorityBlockingQueue<>(); // Metrics m = new Metrics.Builder() // .name(MantisJobStore.class.getCanonicalName()) // .addGauge(initMillisGaugeName) // .addGauge(postInitMillisGaugeName) // .build(); // m = MetricsRegistry.getInstance().registerAndGet(m); // initMillis = m.getGauge(initMillisGaugeName); // postInitMillis = m.getGauge(postInitMillisGaugeName); // (new ScheduledThreadPoolExecutor(1)).scheduleWithFixedDelay(this::deleteOldTerminatedJobs, // DELETE_TERMINATED_JOBS_DELAY_SECS, DELETE_TERMINATED_JOBS_DELAY_SECS, TimeUnit.SECONDS); // } // // private void deleteOldTerminatedJobs() { // final long tooOldCutOff = System.currentTimeMillis() - (getTerminatedJobToDeleteDelayHours()*3600000L); // while(true) { // try { // TerminatedJob jobToDelete = terminatedJobsToDelete.poll(); // if (jobToDelete == null) // return; // logger.info("terminateTime=" + jobToDelete.terminatedTime + ", cutOff=" + tooOldCutOff); // if (jobToDelete.terminatedTime > (tooOldCutOff)) { // // job not ready to be deleted yet, add it back // terminatedJobsToDelete.add(jobToDelete); // return; // } // logger.info("Deleting old job " + jobToDelete.jobId); // try { // // this will in turn call deleteJob(jobId) which will delete it from various maps that contain the job // jobOps.deleteJob(jobToDelete.jobId); // } catch (IOException e) { // logger.warn("Error deleting job \" + jobToDelete.jobId + \", will try again later:" + e.getMessage(), e); // terminatedJobsToDelete.add(jobToDelete); // add it back // } // } // catch (Exception e) { // logger.error("Unexpected error deleting jobs: " + e.getMessage(), e); // } // } // } // // private long getTerminatedJobToDeleteDelayHours() { // return ConfigurationProvider.getConfig().getTerminatedJobToDeleteDelayHours(); // } // // public MantisJobMetadata storeNewJob(WorkerJobDetails jobDetails) throws IOException, JobAlreadyExistsException { // logger.info("Storing job " + jobDetails.getJobId()); // MantisJobMetadataWritable jobMetadata = new MantisJobMetadataWritable(jobDetails.getJobId(), jobDetails.getJobName(), // jobDetails.getUser(), System.currentTimeMillis(), jobDetails.getJobJarUrl(), // jobDetails.getRequest().getJobDefinition().getSchedulingInfo().getStages().size(), // jobDetails.getRequest().getJobDefinition().getJobSla(), MantisJobState.Accepted, // jobDetails.getRequest().getJobDefinition().getSubscriptionTimeoutSecs(), // jobDetails.getRequest().getJobDefinition().getParameters(), 1, // jobDetails.getRequest().getJobDefinition().getMigrationConfig(), // jobDetails.getRequest().getJobDefinition().getLabels()); // storageProvider.storeNewJob(jobMetadata); // activeJobsMap.put(jobMetadata.getJobId(), jobMetadata); // return jobMetadata; // } // // public MantisStorageProvider getStorageProvider() { // return storageProvider; // } // // public void storeJobState(String jobId, MantisJobState state) // throws InvalidJobException, IOException, InvalidJobStateChangeException { // MantisJobMetadataWritable job = activeJobsMap.get(jobId); // if(job == null) { // throw new InvalidJobException(jobId); // } // job.setJobState(state); // storageProvider.updateJob(job); // if(MantisJobState.isTerminalState(state)) { // terminatedJobsToDelete.add(new TerminatedJob(jobId, System.currentTimeMillis())); // archiveJob(job); // archivedJobsMetadataCache.add(job); // activeJobsMap.remove(jobId); // archivedJobIds.put(jobId, jobId); // jobOps.terminateJob(jobId); // } // } // // private void archiveJob(MantisJobMetadataWritable job) throws IOException { // storageProvider.archiveJob(job.getJobId()); // } // // public void storeJobNextWorkerNumber(String jobId, int n) // throws InvalidJobException, IOException { // MantisJobMetadataWritable job = activeJobsMap.get(jobId); // if(job == null) { // throw new InvalidJobException(jobId); // } // job.setNextWorkerNumberToUse(n); // storageProvider.updateJob(job); // } // // public void deleteJob(String jobId) throws IOException, InvalidJobException { // storageProvider.deleteJob(jobId); // activeJobsMap.remove(jobId); // archivedJobIds.remove(jobId); // archivedJobsMetadataCache.remove(jobId); // archivedWorkersCache.remove(jobId); // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.JOB_DELETE, jobId, "")); // } // // public Collection<String> getTerminatedJobIds() { // return new LinkedList<>(archivedJobIds.keySet()); // } // // public List<? extends MantisWorkerMetadata> storeNewWorkers(List<WorkerRequest> workerRequests) // throws IOException, InvalidJobException { // if (workerRequests == null || workerRequests.isEmpty()) // return null; // String jobId = workerRequests.get(0).getJobId(); // logger.info("Adding " + workerRequests.size() + " workers for job " + jobId); // MantisJobMetadataWritable job = activeJobsMap.get(jobId); // if (job == null) // throw new InvalidJobException(jobId, -1, -1); // List<MantisWorkerMetadataWritable> addedWorkers = new ArrayList<>(); // for (WorkerRequest workerRequest : workerRequests) { // if (job.getStageMetadata(workerRequest.getWorkerStage()) == null) { // MantisStageMetadataWritable msmd = new MantisStageMetadataWritable(workerRequest.getJobId(), // workerRequest.getWorkerStage(), workerRequest.getTotalStages(), workerRequest.getDefinition(), // workerRequest.getNumInstancesAtStage(), workerRequest.getHardConstraints(), // workerRequest.getSoftConstraints(), // workerRequest.getSchedulingInfo().forStage(workerRequest.getWorkerStage()).getScalingPolicy(), // workerRequest.getSchedulingInfo().forStage(workerRequest.getWorkerStage()).getScalable()); // boolean added = job.addJobStageIfAbsent(msmd); // if (added) // storageProvider.storeMantisStage(msmd); // store the new // } // MantisWorkerMetadataWritable mwmd = new MantisWorkerMetadataWritable(workerRequest.getWorkerIndex(), // workerRequest.getWorkerNumber(), workerRequest.getJobId(), // workerRequest.getWorkerStage(), workerRequest.getNumPortsPerInstance()); // if (!job.addWorkerMedata(workerRequest.getWorkerStage(), mwmd, null)) { // MantisWorkerMetadata tmp = job.getWorkerByIndex(workerRequest.getWorkerStage(), workerRequest.getWorkerIndex()); // throw new InvalidJobException(job.getJobId(), workerRequest.getWorkerStage(), workerRequest.getWorkerIndex(), // new Exception("Couldn't add worker " + workerRequest.getWorkerNumber() + " as index " + // workerRequest.getWorkerIndex() + ", that index already has worker " + // tmp.getWorkerNumber())); // } // addedWorkers.add(mwmd); // } // storageProvider.storeWorkers(jobId, addedWorkers); // return addedWorkers; // } // // public MantisWorkerMetadata storeNewWorker(WorkerRequest workerRequest) // throws IOException, InvalidJobException { // logger.info("Adding worker index=" + workerRequest.getWorkerIndex()); // MantisJobMetadataWritable job = activeJobsMap.get(workerRequest.getJobId()); // if(job == null) // throw new InvalidJobException(workerRequest.getJobId(), workerRequest.getWorkerStage(), workerRequest.getWorkerIndex()); // if(job.getStageMetadata(workerRequest.getWorkerStage()) == null) { // MantisStageMetadataWritable msmd = new MantisStageMetadataWritable(workerRequest.getJobId(), // workerRequest.getWorkerStage(), workerRequest.getTotalStages(), workerRequest.getDefinition(), // workerRequest.getNumInstancesAtStage(), workerRequest.getHardConstraints(), workerRequest.getSoftConstraints(), // workerRequest.getSchedulingInfo().forStage(workerRequest.getWorkerStage()).getScalingPolicy(), // workerRequest.getSchedulingInfo().forStage(workerRequest.getWorkerStage()).getScalable()); // boolean added = job.addJobStageIfAbsent(msmd); // if(added) // storageProvider.storeMantisStage(msmd); // store the new // } // MantisWorkerMetadataWritable mwmd = new MantisWorkerMetadataWritable(workerRequest.getWorkerIndex(), // workerRequest.getWorkerNumber(), workerRequest.getJobId(), // workerRequest.getWorkerStage(), workerRequest.getNumPortsPerInstance()); // if(!job.addWorkerMedata(workerRequest.getWorkerStage(), mwmd, null)) { // MantisWorkerMetadata tmp = job.getWorkerByIndex(workerRequest.getWorkerStage(), workerRequest.getWorkerIndex()); // throw new InvalidJobException(job.getJobId(), workerRequest.getWorkerStage(), workerRequest.getWorkerIndex(), // new Exception("Couldn't add worker " + workerRequest.getWorkerNumber() + " as index " + // workerRequest.getWorkerIndex() + ", that index already has worker " + // tmp.getWorkerNumber())); // } // storageProvider.storeWorker(mwmd); // return mwmd; // } // // public void updateStage(MantisStageMetadata msmd) // throws IOException, InvalidJobException { // MantisJobMetadataWritable job = activeJobsMap.get(msmd.getJobId()); // if(job == null) // throw new InvalidJobException(msmd.getJobId(), msmd.getStageNum(), -1); // storageProvider.updateMantisStage((MantisStageMetadataWritable)msmd); // } // // /** // * Atomically replace worker with new one created from the given worker request. // * @param workerRequest // * @param replacedWorker // * @return The newly created worker. // * @throws IOException Upon error from storage provider. // * @throws InvalidJobException If there is no such job or stage referred to in the worker metadata. // * @throws InvalidJobStateChangeException If the replaced worker's state cannot be changed. In which case no new // * worker is created. // */ // public MantisWorkerMetadata replaceTerminatedWorker(WorkerRequest workerRequest, MantisWorkerMetadata replacedWorker) // throws IOException, InvalidJobException, InvalidJobStateChangeException { // logger.info("Replacing worker index=" + workerRequest.getWorkerIndex() + " number=" + replacedWorker.getWorkerNumber() + // " with number=" + workerRequest.getWorkerNumber()); // MantisJobMetadataWritable job = activeJobsMap.get(workerRequest.getJobId()); // if(job == null) // throw new InvalidJobException(workerRequest.getJobId(), workerRequest.getWorkerStage(), workerRequest.getWorkerIndex()); // if(job.getStageMetadata(workerRequest.getWorkerStage()) == null) { // throw new InvalidJobException(workerRequest.getJobId(), workerRequest.getWorkerStage(), replacedWorker.getWorkerIndex()); // } // if(!MantisJobState.isTerminalState(replacedWorker.getState())) // throw new InvalidJobStateChangeException(replacedWorker.getJobId(), replacedWorker.getState()); // MantisWorkerMetadataWritable mwmd = new MantisWorkerMetadataWritable(workerRequest.getWorkerIndex(), // workerRequest.getWorkerNumber(), workerRequest.getJobId(), // workerRequest.getWorkerStage(), workerRequest.getNumPortsPerInstance()); // mwmd.setResubmitInfo(replacedWorker.getWorkerNumber(), replacedWorker.getTotalResubmitCount() + 1); // if(!job.addWorkerMedata(replacedWorker.getStageNum(), mwmd, replacedWorker)) // throw new InvalidJobStateChangeException(replacedWorker.getJobId(), replacedWorker.getState(), MantisJobState.Failed); // storageProvider.storeAndUpdateWorkers(mwmd, (MantisWorkerMetadataWritable) replacedWorker); // MantisStageMetadataWritable msmd = (MantisStageMetadataWritable) job.getStageMetadata(replacedWorker.getStageNum()); // if(msmd.removeWorkerInErrorState(replacedWorker.getWorkerNumber()) != null) // archiveWorker((MantisWorkerMetadataWritable)replacedWorker); // return mwmd; // } // // public void storeWorkerState(String jobId, int workerNumber, MantisJobState state) // throws InvalidJobException, InvalidJobStateChangeException, IOException { // this.storeWorkerState(jobId, workerNumber, state, null); // } // // public void storeWorkerState(String jobId, int workerNumber, MantisJobState state, JobCompletedReason reason) // throws InvalidJobException, InvalidJobStateChangeException, IOException { // storeWorkerState(jobId, workerNumber, state, reason, true); // } // // public void storeWorkerState(String jobId, int workerNumber, MantisJobState state, JobCompletedReason reason, boolean archiveIfError) // throws InvalidJobException, InvalidJobStateChangeException, IOException { // MantisWorkerMetadataWritable mwmd = (MantisWorkerMetadataWritable)getWorkerByNumber(jobId, workerNumber); // mwmd.setState(state, System.currentTimeMillis(), reason); // storageProvider.updateWorker(mwmd); // if(archiveIfError && MantisJobState.isErrorState(state)) { // final MantisJobMetadata activeJob = getActiveJob(jobId); // if (activeJob != null) { // MantisStageMetadataWritable msmd = (MantisStageMetadataWritable) activeJob.getStageMetadata(mwmd.getStageNum()); // if(msmd.removeWorkerInErrorState(workerNumber) != null) // archiveWorker(mwmd); // } // } // } // // public void archiveWorker(MantisWorkerMetadataWritable mwmd) throws IOException { // storageProvider.archiveWorker(mwmd); // ConcurrentMap<Integer, MantisWorkerMetadata> workersMap = null; // try { // workersMap = archivedWorkersCache.getArchivedWorkerMap(mwmd.getJobId()); // } catch (ExecutionException e) { // throw new IOException(e); // } // workersMap.putIfAbsent(mwmd.getWorkerNumber(), mwmd); // } // // public List<? extends MantisWorkerMetadata> getArchivedWorkers(String jobId) throws IOException { // try { // return new ArrayList<>(archivedWorkersCache.getArchivedWorkerMap(jobId).values()); // } catch (ExecutionException e) { // throw new IOException(e); // } // } // // public MantisWorkerMetadata getArchivedWorker(String jobId, int workerNumber) throws IOException { // try { // return archivedWorkersCache.getArchivedWorkerMap(jobId).get(workerNumber); // } catch (ExecutionException e) { // throw new IOException(e); // } // } // // private MantisWorkerMetadata getWorkerByNumber(String jobId, int workerNumber) throws InvalidJobException { // MantisJobMetadata mjmd = activeJobsMap.get(jobId); // if(mjmd == null) // throw new InvalidJobException(jobId); // return mjmd.getWorkerByNumber(workerNumber); // } // // /** // * Get the job metadata object for the given Id. // * @param jobId Job ID. // * @return Job object if it exists, null otherwise. // */ // public MantisJobMetadata getActiveJob(final String jobId) { // final MantisJobMetadataWritable mjmd = activeJobsMap.get(jobId); // if (mjmd == null) { // logger.info("activeJobsMap found no job for job ID {}", jobId); // } // return mjmd; // } // // public MantisJobMetadata getCompletedJob(final String jobId) throws IOException { // final MantisJobMetadata job = archivedJobsMetadataCache.getJob(jobId); // if (job == null) { // logger.info("archivedJobsMetadataCache found no job for job ID {}", jobId); // } // return job; // } // // /** // * Get all workers of a stage for a given job. Generally, this is expected to return quickly, for // * example, by looking up a cache if possible. // * @param jobId // * @param stageNum // * @return // * @throws InvalidJobException // * @throws IOException // */ // final public Collection<MantisWorkerMetadata> getWorkers(String jobId, int stageNum) // throws InvalidJobException, IOException { // return activeJobsMap.get(jobId).getStageMetadata(stageNum).getWorkerByIndexMetadataSet(); // } // // private void archiveWorkers(MantisJobMetadataWritable mjmd) throws IOException { // for(MantisStageMetadata msmd: mjmd.getStageMetadata()) { // for(MantisWorkerMetadataWritable removedWorker: // ((MantisStageMetadataWritable)msmd).removeArchiveableWorkers()) { // archiveWorker(removedWorker); // } // } // } // // static SchedulingInfo getSchedulingInfo(MantisJobMetadata mjmd) { // int numStages = mjmd.getNumStages(); // logger.info("numStages=" + numStages); // Map<Integer, StageSchedulingInfo> stagesMap = new HashMap<>(); // for (MantisStageMetadata stageMetadata: mjmd.getStageMetadata()) { // stagesMap.put(stageMetadata.getStageNum(), new StageSchedulingInfo(stageMetadata.getNumWorkers(), // stageMetadata.getMachineDefinition(), stageMetadata.getHardConstraints(), // stageMetadata.getSoftConstraints(), stageMetadata.getScalingPolicy(), // stageMetadata.getScalable())); // } // return new SchedulingInfo(stagesMap); // } // // final public void start() { // logger.info("Mantis store starting now"); // final CountDownLatch latch = new CountDownLatch(1); // final List<MantisJobMetadataWritable> jobsToArchive = new LinkedList<>(); // final AtomicReference<Collection<NamedJob>> ref = new AtomicReference<>(); // new Thread() { // @Override // public void run() { // long st = System.currentTimeMillis(); // try { // for(MantisJobMetadataWritable mjmd: storageProvider.initJobs()) { // archiveWorkers(mjmd); // if(MantisJobState.isTerminalState(mjmd.getState())) { // terminatedJobsToDelete.add(new TerminatedJob(mjmd.getJobId(), getTerminatedAt(mjmd))); // jobsToArchive.add(mjmd); // } // else // activeJobsMap.put(mjmd.getJobId(), mjmd); // } // logger.info("Read " + activeJobsMap.size() + " job records from persistence in " + (System.currentTimeMillis() - st) + " ms"); // if(jobsInitializer != null) { // Map<String, MantisJobDefinition> jobDefsMap = new HashMap<>(); // for(MantisJobMetadata mjmd: activeJobsMap.values()) { // // we derive the value for being ready for job master by looking to see if stage 0 was // // created when this job was created. // final boolean isReadyForJobMaster = mjmd.getStageMetadata(0) != null; // jobDefsMap.put(mjmd.getJobId(), new MantisJobDefinition(mjmd.getName(), mjmd.getUser(), mjmd.getJarUrl(), "", // mjmd.getParameters(), mjmd.getSla(), mjmd.getSubscriptionTimeoutSecs(), getSchedulingInfo(mjmd), // 0, 0, null, null, isReadyForJobMaster, mjmd.getMigrationConfig(),mjmd.getLabels())); // min/max and cron spec don't matter for job instance // } // ref.set(jobsInitializer.call(MantisJobStore.this, jobDefsMap)); // } // } catch (IOException e) { // logger.error( // String.format("Exiting due to storage init failure: %s: ", e.getMessage()), // e // ); // System.exit(1); // can't deal with storage error // } // initMillis.set(System.currentTimeMillis() - st); // latch.countDown(); // } // } // .start(); // long until = System.currentTimeMillis() + maxInitTimeSecs*1000L; // while(System.currentTimeMillis() < until) { // try { // if(!latch.await(until - System.currentTimeMillis(), TimeUnit.MILLISECONDS)) { // logger.error("Timed out waiting for initialization after " + maxInitTimeSecs + " secs, committing suicide"); // System.exit(3); // } // break; // } catch (InterruptedException e) { // logger.warn("Interrupted waiting for initialization"); // } // } // // asynchronously start post init // new Thread() { // @Override // public void run() { // doPostInit(jobsToArchive, ref.get()); // } // }.start(); // } // // private long getTerminatedAt(MantisJobMetadata mjmd) { // long terminatedAt = mjmd.getSubmittedAt(); // for(MantisStageMetadata msmd: mjmd.getStageMetadata()) { // for(MantisWorkerMetadata mwmd: msmd.getAllWorkers()) { // terminatedAt = Math.max(terminatedAt, mwmd.getCompletedAt()); // } // } // return terminatedAt; // } // // private void doPostInit(List<MantisJobMetadataWritable> jobsToArchive, Collection<NamedJob> namedJobs) { // long start1 = System.currentTimeMillis(); // final AtomicInteger count = new AtomicInteger(); // storageProvider.initArchivedJobs() // .doOnNext(job -> { // archivedJobsMetadataCache.add((MantisJobMetadataWritable) job); // archivedJobIds.put(job.getJobId(), job.getJobId()); // terminatedJobsToDelete.add(new TerminatedJob(job.getJobId(), getTerminatedAt(job))); // count.incrementAndGet(); // }) // .toBlocking() // .lastOrDefault(null); // logger.info("Read " + count.get() + " archived job records from persistence in " + (System.currentTimeMillis() - start1) + " ms"); // if (!jobsToArchive.isEmpty()) { // long start2 = System.currentTimeMillis(); // Map<String, NamedJob> namedJobMap = new HashMap<>(); // if (!namedJobs.isEmpty()) { // for (NamedJob nj: namedJobs) // namedJobMap.put(nj.getName(), nj); // } // for (MantisJobMetadataWritable job: jobsToArchive) { // try { // archiveJob(job); // final NamedJob namedJob = namedJobMap.get(job.getName()); // if (namedJob != null) // namedJob.initCompletedJob(new NamedJob.CompletedJob(job.getName(), job.getJobId(), // null, job.getState(), job.getSubmittedAt(), getTerminatedAt(job), job.getUser(),namedJob.getLabels())); // } catch (IOException e) { // logger.error("Error archiving job " + job.getJobId() + ": " + e.getMessage(), e); // } // } // logger.info("Moved " + jobsToArchive.size() + " completed jobs to archived storage in " + (System.currentTimeMillis() - start2) + " ms"); // } // postInitMillis.set(System.currentTimeMillis() - start1); // } // // public final void shutdown() { // activeJobsMap.clear(); // storageProvider.shutdown(); // } //}
4,348
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/InvalidJobStateChangeException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; import io.mantisrx.runtime.MantisJobState; public class InvalidJobStateChangeException extends Exception { public InvalidJobStateChangeException(String jobId, MantisJobState state) { super("Unexpected state " + state + " for job " + jobId); } public InvalidJobStateChangeException(String jobId, MantisJobState state, Throwable t) { super("Unexpected state " + state + " for job " + jobId, t); } public InvalidJobStateChangeException(String jobId, MantisJobState fromState, MantisJobState toState) { super("Invalid state transition of job " + jobId + " from state " + fromState + " to " + toState); } public InvalidJobStateChangeException(String jobId, MantisJobState fromState, MantisJobState toState, Throwable cause) { super("Invalid state transition of job " + jobId + " from state " + fromState + " to " + toState, cause); } }
4,349
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/NamedJob.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Optional; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import com.netflix.fenzo.triggers.CronTrigger; import com.netflix.fenzo.triggers.TriggerOperator; import com.netflix.fenzo.triggers.exceptions.SchedulerException; import io.mantisrx.common.Label; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.NamedJobDefinition; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.parameter.Parameter; import io.mantisrx.server.master.MantisJobOperations; import io.mantisrx.server.master.config.ConfigurationProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.functions.Action1; //import com.google.common.collect.Lists; public class NamedJob { private static final Logger logger = LoggerFactory.getLogger(NamedJob.class); private static final int MaxValueForSlaMin = 5; private static final int MaxValueForSlaMax = 100; private final String name; private final List<Jar> jars = new ArrayList<>(); private JobOwner owner; private volatile SLA sla; private List<Parameter> parameters; private boolean isReadyForJobMaster = false; private WorkerMigrationConfig migrationConfig; private volatile long lastJobCount = 0; private volatile boolean disabled = false; private volatile boolean cronActive = false; // @JsonIgnore // private Map<String, CompletedJob> completedJobs = new HashMap<>(); // @JsonIgnore // private final BehaviorSubject<String> jobIds; // @JsonIgnore // private final SortedSet<MantisJobMgr> sortedJobMgrs; // @JsonIgnore // private final SortedSet<MantisJobMgr> sortedRegisteredJobMgrs; // @JsonIgnore // private final ReentrantLock lock = new ReentrantLock(); // @JsonIgnore private volatile boolean isActive = true; // @JsonIgnore private MantisJobOperations jobOps; // @JsonIgnore // private final static String JobSubmissionsCounterName = "JobSlaNumSubmissions"; // @JsonIgnore // private final Counter jobSubmissionsCounter; // @JsonIgnore // private final static String JobTerminationsCounterName = "JobSlaNumTerminations"; // @JsonIgnore // private final Counter jobTerminationsCounter; // @JsonIgnore // private final static String SLA_FAILED_JOB_CLUSTERS = "slaFailedJobClusters"; // @JsonIgnore // private final Counter slaFailedJobClustersCounter; // @JsonIgnore // private final Comparator<MantisJobMgr> comparator = (o1, o2) -> { // if (o2 == null) // return -1; // if (o1 == null) // return 1; // return Long.compare(getJobIdNumber(o1.getJobId()), getJobIdNumber(o2.getJobId())); // }; // @JsonIgnore // private MantisStorageProvider storageProvider = null; // @JsonIgnore // AtomicBoolean isEnforcingSla = new AtomicBoolean(false); // private List<Label> labels; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public NamedJob(@JsonProperty("jobOps") MantisJobOperations jobOps, @JsonProperty("name") String name, @JsonProperty("jars") List<Jar> jars, @JsonProperty("sla") SLA sla, @JsonProperty("parameters") List<Parameter> parameters, @JsonProperty("owner") JobOwner owner, @JsonProperty("lastJobCount") long lastJobCount, @JsonProperty("disabled") boolean disabled, @JsonProperty("isReadyForJobMaster") boolean isReadyForJobMaster, @JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig, @JsonProperty("labels") List<Label> labels) { this.jobOps = jobOps; this.name = name; if (sla == null) sla = new SLA(0, 0, null, null); this.disabled = disabled; this.isReadyForJobMaster = isReadyForJobMaster; this.migrationConfig = Optional.ofNullable(migrationConfig).orElse(WorkerMigrationConfig.DEFAULT); this.sla = sla; try { this.sla.validate(); } catch (InvalidNamedJobException e) { logger.warn(name + ": disabling due to unexpected error validating sla: " + e.getMessage()); this.disabled = true; } if (labels != null) { this.labels = labels; } else { this.labels = new LinkedList<>(); } this.parameters = parameters; this.owner = owner; this.lastJobCount = lastJobCount; if (jars != null) this.jars.addAll(jars); //// jobIds = BehaviorSubject.create(); //// sortedJobMgrs = new TreeSet<>(comparator); //// sortedRegisteredJobMgrs = new TreeSet<>(comparator); //// Metrics m = new Metrics.Builder() //// .id(NamedJob.class.getCanonicalName(), new BasicTag("jobcluster", name)) //// .addCounter(JobSubmissionsCounterName) //// .addCounter(JobTerminationsCounterName) //// .addCounter(SLA_FAILED_JOB_CLUSTERS) //// .build(); //// m = MetricsRegistry.getInstance().registerAndGet(m); //// jobSubmissionsCounter = m.getCounter(JobSubmissionsCounterName); //// jobTerminationsCounter = m.getCounter(JobTerminationsCounterName); //// slaFailedJobClustersCounter = m.getCounter(SLA_FAILED_JOB_CLUSTERS); //// try { // setupCron(); // } catch (SchedulerException e) { // logger.error(name + ": error setting up cron: " + e.getMessage()); // } } public static String getJobId(String name, long number) { return name + "-" + number; } static String getJobName(String jobId) { return jobId.substring(0, jobId.lastIndexOf('-')); } private static long getJobIdNumber(String jobId) { return Long.parseLong(jobId.substring(jobId.lastIndexOf('-') + 1)); } @Override public String toString() { return "NamedJob [name=" + name + ", jars=" + jars + ", owner=" + owner + ", sla=" + sla + ", parameters=" + parameters + ", isReadyForJobMaster=" + isReadyForJobMaster + ", migrationConfig=" + migrationConfig + ", lastJobCount=" + lastJobCount + ", disabled=" + disabled + ", isActive=" + isActive + ", labels=" + labels + "]"; } private int getMaxNumberOfJars() { return ConfigurationProvider.getConfig().getMaximumNumberOfJarsPerJobName(); } // @JsonIgnore // void setStorageProvider(MantisStorageProvider storageProvider) { // this.storageProvider = storageProvider; // } // // private void setupCron() throws SchedulerException { // try { // if (!disabled) { // sla.initCron(this); // cronActive = sla.hasCronSpec; // } // } catch (SchedulerException e) { // cronActive = false; // disabled = true; // throw e; // } // } /* package */ void setJobOps(MantisJobOperations jobOps) { this.jobOps = jobOps; } // private void trim() { // int maxNumberOfJars = getMaxNumberOfJars(); // if (jars.size() > maxNumberOfJars) { // final Iterator<Jar> iterator = jars.iterator(); // int toRemove = jars.size() - maxNumberOfJars; // while (iterator.hasNext() && toRemove-- > 0) { // final Jar next = iterator.next(); // if (notReferencedByJobs(next)) // iterator.remove(); // } // } // } // private boolean notReferencedByJobs(Jar next) { // for (MantisJobMgr jobMgr : sortedJobMgrs) { // if (jobMgr.isActive() && jobMgr.getJobMetadata().getJarUrl().toString().equals(next.getUrl().toString())) // return false; // } // return true; // } public String getName() { return name; } // void addJar(Jar jar) throws InvalidNamedJobException { // // add only if version is unique // for (Jar j : jars) // if (j.version.equals(jar.version)) // throw new InvalidNamedJobException("Jar version " + jar.version + " already used, must be unique"); // jars.add(jar); // trim(); // } public List<Jar> getJars() { return Collections.unmodifiableList(jars); } public SLA getSla() { return sla; } public List<Parameter> getParameters() { return parameters; } void setParameters(List<Parameter> parameters) { this.parameters = parameters; } public List<Label> getLabels() { return this.labels; } void setLabels(List<Label> labels) { this.labels = labels; } public JobOwner getOwner() { return owner; } void setOwner(JobOwner owner) { this.owner = owner; } public long getLastJobCount() { return lastJobCount; } @JsonIgnore public long getNextJobNumber() { return ++lastJobCount; } public boolean getDisabled() { return disabled; } public void setDisabled(boolean disabled) { this.disabled = disabled; // enforceSla(Optional.empty()); } public boolean getIsReadyForJobMaster() { return isReadyForJobMaster; } public void setIsReadyForJobMaster(boolean b) { isReadyForJobMaster = b; } public WorkerMigrationConfig getMigrationConfig() { return migrationConfig; } public void setMigrationConfig(final WorkerMigrationConfig migrationConfig) { this.migrationConfig = migrationConfig; } @JsonIgnore public boolean getIsActive() { return isActive; } // @JsonIgnore // public Map<String, CompletedJob> getCompletedJobs() { // return completedJobs; // } @JsonIgnore public void setInactive() throws NamedJobDeleteException { // setDisabled(true); // // delete all completed jobs // // In order to avoid concurrent modification, copy the completed jobs from the map's values since the map will // // get modified from within the call of operations triggered to deleted each job. // List<CompletedJob> cjobs = new LinkedList<>(completedJobs.values()); // for (CompletedJob cj : cjobs) { // try { // jobOps.deleteJob(cj.getJobId()); // } catch (IOException e) { // throw new NamedJobDeleteException("Error deleting job " + cj.jobId, e); // } // } // completedJobs.clear(); isActive = false; } public boolean getCronActive() { return cronActive; } /** * Get the Jar for the job that matches the <code>version</code> argument. * * @param version The version to match. * * @return Latest jar uploaded if <code>version</code> is <code>null</code> or empty, or jar whose version * matches the argument, or null if no such version exists. */ @JsonIgnore Jar getJar(String version) { if (version == null || version.isEmpty()) return jars.get(jars.size() - 1); for (Jar j : jars) if (version.equals(j.version)) return j; return null; } // void setSla(SLA sla) throws InvalidNamedJobException { // try (AutoCloseable l = obtainLock()) { // sla.validate(); // this.sla.destroyCron(); // this.sla = sla; // try { // setupCron(); // } catch (SchedulerException e) { // throw new InvalidNamedJobException(e.getMessage(), e); // } // enforceSla(Optional.empty()); // } catch (Exception e) { // shouldn't happen, this is only to make obtainlock() happy // logger.warn("Unexpected exception setting sla: " + e.getMessage()); // throw new InvalidNamedJobException("Unexpected error: " + e.getMessage(), e); // } // } public static class CompletedJob { private final String name; private final String jobId; private final String version; private final MantisJobState state; private final long submittedAt; private final long terminatedAt; private final String user; private final List<Label> labels; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public CompletedJob( @JsonProperty("name") String name, @JsonProperty("jobId") String jobId, @JsonProperty("version") String version, @JsonProperty("state") MantisJobState state, @JsonProperty("submittedAt") long submittedAt, @JsonProperty("terminatedAt") long terminatedAt, @JsonProperty("user") String user, @JsonProperty("labels") List<Label> labels ) { this.name = name; this.jobId = jobId; this.version = version; this.state = state; this.submittedAt = submittedAt; this.terminatedAt = terminatedAt; this.user = user; if (labels != null) { this.labels = labels; } else { this.labels = new ArrayList<>(); } } public String getName() { return name; } public String getJobId() { return jobId; } public String getVersion() { return version; } public MantisJobState getState() { return state; } public long getSubmittedAt() { return submittedAt; } public long getTerminatedAt() { return terminatedAt; } public String getUser() { return user; } public List<Label> getLabels() { return labels; } @Override public String toString() { return "CompletedJob{" + "name='" + name + '\'' + ", jobId='" + jobId + '\'' + ", version='" + version + '\'' + ", state=" + state + ", submittedAt=" + submittedAt + ", terminatedAt=" + terminatedAt + ", user='" + user + '\'' + ", labels=" + labels + '}'; } } public static class Jar { private final URL url; private final String version; private final long uploadedAt; private final SchedulingInfo schedulingInfo; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public Jar(@JsonProperty("url") URL url, @JsonProperty("uploadedAt") long uploadedAt, @JsonProperty("version") String version, @JsonProperty("schedulingInfo") SchedulingInfo schedulingInfo) { this.url = url; this.uploadedAt = uploadedAt; this.version = (version == null || version.isEmpty()) ? "" + System.currentTimeMillis() : version; this.schedulingInfo = schedulingInfo; } public URL getUrl() { return url; } public long getUploadedAt() { return uploadedAt; } public String getVersion() { return version; } public SchedulingInfo getSchedulingInfo() { return schedulingInfo; } } public static class SLA { @JsonIgnore private static final TriggerOperator triggerOperator; static { triggerOperator = new TriggerOperator(1); try { triggerOperator.initialize(); } catch (SchedulerException e) { logger.error("Unexpected: " + e.getMessage(), e); throw new RuntimeException(e); } } private final int min; private final int max; private final String cronSpec; private final NamedJobDefinition.CronPolicy cronPolicy; @JsonIgnore private final boolean hasCronSpec; @JsonIgnore private final NamedJobDefinition.CronPolicy defaultPolicy = NamedJobDefinition.CronPolicy.KEEP_EXISTING; @JsonIgnore private CronTrigger<NamedJob> scheduledTrigger; @JsonIgnore private String triggerGroup = null; @JsonIgnore private String triggerId = null; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public SLA( @JsonProperty("min") int min, @JsonProperty("max") int max, @JsonProperty("cronSpec") String cronSpec, @JsonProperty("cronPolicy") NamedJobDefinition.CronPolicy cronPolicy ) { if (cronSpec != null && !cronSpec.isEmpty()) { this.cronSpec = cronSpec; hasCronSpec = true; this.max = 1; this.min = 0; this.cronPolicy = cronPolicy == null ? defaultPolicy : cronPolicy; } else { hasCronSpec = false; this.min = min; this.max = max; this.cronSpec = null; this.cronPolicy = null; } } public int getMin() { return min; } public int getMax() { return max; } public String getCronSpec() { return cronSpec; } public NamedJobDefinition.CronPolicy getCronPolicy() { return cronPolicy; } private void validate() throws InvalidNamedJobException { if (max < min) throw new InvalidNamedJobException("Cannot have max=" + max + " < min=" + min); if (min > MaxValueForSlaMin) throw new InvalidNamedJobException("Specified min sla value " + min + " cannot be >" + MaxValueForSlaMin); if (max > MaxValueForSlaMax) throw new InvalidNamedJobException("Max sla value " + max + " cannot be >" + MaxValueForSlaMax); } // caller must lock to avoid concurrent access with destroyCron() private void initCron(NamedJob job) throws SchedulerException { // DISABLED AS Master V2 does not use this class for cron // if(!hasCronSpec || triggerId != null) // return; // logger.info("Init'ing cron for " + job.getName()); // triggerGroup = job.getName() + "-" + this; // try { // scheduledTrigger = new CronTrigger<>(cronSpec, job.getName(), job, NamedJob.class, CronTriggerAction.class); // triggerId = triggerOperator.registerTrigger(triggerGroup, scheduledTrigger); // } catch (IllegalArgumentException e) { // throw new SchedulerException(e.getMessage(), e); // } } // caller must lock to avoid concurrent access with initCron() private void destroyCron() { // DISABLED AS Master V2 does not use this class for cron // try { // if (triggerId != null) { // logger.info("Destroying cron " + triggerId); // triggerOperator.deleteTrigger(triggerGroup, triggerId); // triggerId = null; // } // } catch (TriggerNotFoundException | SchedulerException e) { // logger.warn("Couldn't delete trigger group " + triggerGroup + ", id " + triggerId); // } } } // MantisJobMgr getJobWithUniqueTag(String unique) { // try (AutoCloseable l = obtainLock()) { // MantisJobMgr mgr = getActiveJobMgrWithUniqueTag(unique); // return mgr == null ? // getRegisteredJobMgrWithUniqueTag(unique) : // mgr; // } catch (Exception e) { // logger.warn("Unexpected exception: " + e.getMessage()); // return null; // } // } // // private MantisJobMgr getRegisteredJobMgrWithUniqueTag(String unique) { // for (MantisJobMgr jobMgr : sortedRegisteredJobMgrs) { // final String uniqueTag = NamedJobs.getUniqueTag(jobMgr.getJobMetadata().getSla().getUserProvidedType()); // if (jobMgr.isActive() && uniqueTag != null && !uniqueTag.isEmpty() && uniqueTag.equals(unique)) // return jobMgr; // } // return null; // } // // private MantisJobMgr getActiveJobMgrWithUniqueTag(String unique) { // for (MantisJobMgr jobMgr : sortedJobMgrs) { // final String uniqueTag = NamedJobs.getUniqueTag(jobMgr.getJobMetadata().getSla().getUserProvidedType()); // if (jobMgr.isActive() && uniqueTag != null && !uniqueTag.isEmpty() && uniqueTag.equals(unique)) // return jobMgr; // } // return null; // } // Keep this public since Quartz needs to call it when triggering cron. public static class CronTriggerAction implements Action1<NamedJob> { @Override public void call(NamedJob job) { logger.info("Cron fired for " + job.getName()); // try (AutoCloseable l = job.obtainLock()) { // if (job.sla.cronPolicy != null) { // MantisJobMgr jobMgr = null; // if (!job.sortedJobMgrs.isEmpty()) // jobMgr = job.sortedJobMgrs.last(); // else if (!job.sortedRegisteredJobMgrs.isEmpty()) // jobMgr = job.sortedRegisteredJobMgrs.last(); // if (job.sla.cronPolicy == NamedJobDefinition.CronPolicy.KEEP_NEW || // jobMgr == null || MantisJobState.isTerminalState(jobMgr.getJobMetadata().getState())) // job.quickSubmitWithDefaults("Cron"); // else // logger.info(job.getName() + ": Skipping submitting new job upon cron trigger, one exists already"); // } // } catch (Exception e) { // // logger.warn(job.getName() + ": Unexpected error in cron trigger execution: " + e.getMessage(), e); // } } } // @JsonIgnore // public Observable<String> getJobIds() { // return jobIds; // } // public void init(Collection<MantisJobMgr> jobMgrs) { // logger.info("Init'ing Job Cluster " + name + " with " + (jobMgrs == null ? 0 : jobMgrs.size()) + " jobs"); // if (jobMgrs == null || jobMgrs.isEmpty()) // return; // for (MantisJobMgr m : jobMgrs) { // if (m.getJobMetadata().getState() == MantisJobState.Accepted) // sortedRegisteredJobMgrs.add(m); // else if (m.getJobMetadata().getState() == MantisJobState.Launched) // sortedJobMgrs.add(m); // // else, ignore other states // } // if (!sortedJobMgrs.isEmpty()) // jobIds.onNext(sortedJobMgrs.last().getJobId()); // } // // public void registerJobMgr(MantisJobMgr m) { // try (AutoCloseable l = obtainLock()) { // sortedRegisteredJobMgrs.add(m); // } catch (Exception e) { // logger.warn("Unexpected error: " + e.getMessage()); // } // } // // public void addJobMgr(MantisJobMgr m) { // try (AutoCloseable l = obtainLock()) { // sortedRegisteredJobMgrs.remove(m); // if (!sortedJobMgrs.add(m)) // return; // already present in our set // } catch (Exception e) { // logger.error("Unexpected error adding jobMgr for " + m.getJobId() + ": " + e.getMessage(), e); // } // jobIds.onNext(m.getJobId()); // enforceSla(Optional.empty()); // } // // public void jobComplete(final MantisJobMgr m, final MantisJobState state, final long submittedAt, final String user) // throws IOException { // try (AutoCloseable l = obtainLock()) { // sortedJobMgrs.remove(m); // sortedRegisteredJobMgrs.remove(m); // } catch (Exception e) { // logger.error("Unexpected error removing complete job: " + e.getMessage(), e); // } // final CompletedJob completedJob = new CompletedJob(name, m.getJobId(), null, state, // submittedAt, System.currentTimeMillis(), user, new ArrayList<>()); // completedJobs.put(m.getJobId(), completedJob); // storageProvider.storeCompletedJobForNamedJob(name, completedJob); // if (m.getJobMetadata() != null) { // enforceSla(Optional.of(m.getJobMetadata())); // } else { // enforceSla(m.getCompletedJobMetadata()); // } // } // // void initCompletedJob(CompletedJob c) { // if (c != null) { // completedJobs.put(c.getJobId(), c); // } // } // // @JsonIgnore // public Collection<MantisJobMgr> getAllJobMgrs() { // List<MantisJobMgr> result = new ArrayList<>(sortedJobMgrs); // result.addAll(sortedRegisteredJobMgrs); // return result; // } // // /* package */ void enforceSla(final Optional<MantisJobMetadata> lastRemovedMantisJobMetadata) { // if (!isEnforcingSla.compareAndSet(false, true)) // return; // already running // List<MantisJobMgr> toKill = new ArrayList<>(); // try (AutoCloseable l = obtainLock()) { // if (disabled) { // List<MantisJobMgr> jobsToKill = new ArrayList<>(); // jobsToKill.addAll(sortedRegisteredJobMgrs); // jobsToKill.addAll(sortedJobMgrs); // if (!jobsToKill.isEmpty()) { // // ensure no job is running // jobsToKill.stream().filter(MantisJobMgr::isActive).forEach(jobMgr -> { // jobOps.killJob("MantisMaster", jobMgr.getJobId(), "job " + getName() + " is disabled"); // jobTerminationsCounter.increment(); // }); // } // sla.destroyCron(); // return; // } // if (sla == null || (sla.min == 0 && sla.max == 0)) // return; // try { // setupCron(); // } catch (SchedulerException e) { // // this is unexpected since sla would have been validated before it was set // logger.error(name + ": Unexpected to fail initializing cron: " + e.getMessage()); // } // List<MantisJobMgr> activeJobMgrs = // sortedJobMgrs.stream().filter(MantisJobMgr::isActive).collect(Collectors.toList()); // List<MantisJobMgr> activeRgstrdJobMgrs = // sortedRegisteredJobMgrs.stream().filter(MantisJobMgr::isActive).collect(Collectors.toList()); // // there could be some jobs running and some registered but not running yet. Eagerly enforcing the sla.max // // could result in killing the running job in favor of the new job, which may not start successfully. Instead, // // we take the following approach: // // Manage min by combining the total of both running and registered jobs. This ensures we don't start // // too many new jobs if previously started ones stay in registered for too long for not successfully starting. // if (sla != null && (activeJobMgrs.size() + activeRgstrdJobMgrs.size()) < sla.min) { // logger.info("Submitting " + (sla.min - activeJobMgrs.size()) + " jobs per sla min of " + sla.min + // " for job name " + name); // for (int i = 0; i < sla.min - activeJobMgrs.size(); i++) { // MantisJobMetadata last = null; // if (lastRemovedMantisJobMetadata.isPresent()) { // logger.info("got last removed job {}", lastRemovedMantisJobMetadata.get().getJobId()); // last = lastRemovedMantisJobMetadata.get(); // } // if (!sortedJobMgrs.isEmpty()) { // final MantisJobMetadata lastSorted = sortedJobMgrs.last().getJobMetadata(); // if (last == null || (getJobIdNumber(lastSorted.getJobId()) > getJobIdNumber(last.getJobId()))) { // logger.info("last removed job from sortedJobMgrs {}", lastSorted.getJobId()); // last = lastSorted; // } // } // if (last == null) { // // get it from archived jobs // if (!completedJobs.isEmpty()) { // long latestCompletedAt = 0L; // CompletedJob latest = null; // for (CompletedJob j : completedJobs.values()) { // if (latest == null || latestCompletedAt < j.getTerminatedAt()) { // latest = j; // latestCompletedAt = j.getTerminatedAt(); // } // } // if (latest != null) { // last = storageProvider.loadArchivedJob(latest.getJobId()); // logger.info("last job from completedJobs {}", last.getJobId()); // } // } // } // if (last == null) { // logger.warn("Can't submit new job to maintain sla for job cluster " + name + ": no previous job to clone"); // slaFailedJobClustersCounter.increment(); // } else { // logger.info("submitting new job using job metadata from last job {}", last.getJobId()); // if (submitNewJob(last) != null) { // jobSubmissionsCounter.increment(); // } // } // } // } // // Manage max by killing any excess running jobs. Also, kill any registered jobs older than remaining // // running jobs, or in excess of sla.max. // // For this we sort running and registered JobMgrs and walk the list in descending order to apply this logic. // SortedSet<MantisJobMgr> allSortedJobMgrs = new TreeSet<>(comparator); // allSortedJobMgrs.addAll(activeJobMgrs); // allSortedJobMgrs.addAll(activeRgstrdJobMgrs); // final MantisJobMgr[] mantisJobMgrs = allSortedJobMgrs.toArray(new MantisJobMgr[allSortedJobMgrs.size()]); // if (mantisJobMgrs.length > 0) { // boolean slaSatisfied = false; // int activeCount = 0; // int registeredCount = 0; // for (int i = mantisJobMgrs.length - 1; i >= 0; i--) { // MantisJobMgr m = mantisJobMgrs[i]; // boolean isActive = m.getJobMetadata() != null && // m.getJobMetadata().getState() == MantisJobState.Launched; // if (!isActive) // registeredCount++; // if (!isActive && !slaSatisfied && (registeredCount + activeCount) <= sla.max) { // continue; // } // if (slaSatisfied || (!isActive && (registeredCount + activeCount) > sla.max)) { // toKill.add(m); // carry out the kills after unlocking this object // } else if (isActive) // activeCount++; // if (activeCount >= sla.max) // slaSatisfied = true; // } // } // } catch (Exception e) { // logger.error("Unknown error enforcing SLA for " + name + ": " + e.getMessage(), e); // } // shouldn't happen // finally { // try { // if (!toKill.isEmpty()) { // for (MantisJobMgr m : toKill) { // slaKill(m); // } // logger.info(name + ": killed " + toKill.size() + " jobs per sla max of " + sla.max); // } // removeExpiredCompletedJobs(); // } finally { // isEnforcingSla.set(false); // mark exit of enforceSla // } // } // } // private void removeExpiredCompletedJobs() { // if (!completedJobs.isEmpty()) { // final long cutOff = System.currentTimeMillis() - (ConfigurationProvider.getConfig().getTerminatedJobToDeleteDelayHours() * 3600000L); // new LinkedList<>(completedJobs.values()).stream().filter(j -> j.getTerminatedAt() < cutOff).forEach(j -> { // try { // storageProvider.removeCompledtedJobForNamedJob(name, j.getJobId()); // completedJobs.remove(j.getJobId()); // } catch (IOException e) { // logger.warn("Error removing completed job " + j.getJobId() + ": " + e.getMessage(), e); // } // }); // } // } // // private void slaKill(MantisJobMgr jobMgr) { // jobOps.killJob("MantisMaster", jobMgr.getJobId(), "#jobs exceeded for SLA max of " + sla.max); // jobTerminationsCounter.increment(); // } // // private String quickSubmitWithDefaults(String user) throws InvalidJobException { // try (AutoCloseable l = obtainLock()) { // final MantisJobMgr jobMgr = sortedJobMgrs.isEmpty() ? null : sortedJobMgrs.last(); // if (jobMgr == null) { // CompletedJob lastJob = getLastCompletedJob(); // if (lastJob == null) { // // create a default job with info we have // final MantisJobStatus status = jobOps.submit( // new MantisJobDefinition( // name, user, null, null, // parameters, new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Perpetual, ""), // 0, jars.get(jars.size() - 1).schedulingInfo, sla.min, sla.max, sla.cronSpec, sla.cronPolicy, // isReadyForJobMaster, migrationConfig, labels) // ); // if (status.getFatalError() != null) { // throw new InvalidJobException(name + ": Couldn't submit job with defaults: " + status.getFatalError()); // } else // return status.getJobId(); // } else { // try { // return submitFromCompletedJob(lastJob.getJobId(), user); // } catch (IOException e) { // throw new InvalidJobException(lastJob.getJobId(), e); // } // } // } else // return submitNewJob(jobMgr.getJobMetadata(), user); // } catch (Exception e) { // logger.warn("Unexpected error submitting job with defaults: " + e.getMessage(), e); // return null; // } // } // // String quickSubmit(String user) throws InvalidJobException { // final MantisJobMgr jobMgr = sortedJobMgrs.isEmpty() ? null : sortedJobMgrs.last(); // if (jobMgr == null) { // CompletedJob lastJob = getLastCompletedJob(); // if (lastJob == null) // throw new InvalidJobException("No previous job to copy parameters or scheduling info for quick submit"); // try { // return submitFromCompletedJob(lastJob.getJobId(), user); // } catch (IOException e) { // throw new InvalidJobException(lastJob.getJobId(), e); // } // } else // return submitNewJob(jobMgr.getJobMetadata(), user); // } // // private String submitFromCompletedJob(String jobId, String user) throws IOException, InvalidJobException { // final MantisJobMetadataWritable jobMetadata = storageProvider.loadArchivedJob(jobId); // if (jobMetadata == null) { // throw new InvalidJobException(jobId, new Exception("Can't load completed job from archive")); // } // return submitNewJob(new MantisJobDefinition( // name, user, null, null, // jobMetadata.getParameters(), jobMetadata.getSla(), jobMetadata.getSubscriptionTimeoutSecs(), // MantisJobStore.getSchedulingInfo(jobMetadata), // sla.min, sla.max, sla.cronSpec, sla.cronPolicy, isReadyForJobMaster, migrationConfig, jobMetadata.getLabels() // )); // } // // private CompletedJob getLastCompletedJob() { // CompletedJob last = null; // if (!completedJobs.isEmpty()) { // for (CompletedJob c : completedJobs.values()) { // if (last == null || last.getTerminatedAt() < c.getTerminatedAt()) { // last = c; // } // } // } // return last; // } // // private String submitNewJob(MantisJobMetadata jobMetadata) { // return submitNewJob(jobMetadata, jobMetadata.getUser()); // } // // private String submitNewJob(MantisJobMetadata jobMetadata, String user) { // return submitNewJob(new MantisJobDefinition(name, user, // null, // don't specify jar, let it pick latest // null, // don't specify jar version, let it pick latest // jobMetadata.getParameters(), // jobMetadata.getSla(), // jobMetadata.getSubscriptionTimeoutSecs(), // MantisJobStore.getSchedulingInfo(jobMetadata), sla.min, sla.max, sla.cronSpec, sla.cronPolicy, // jobMetadata.getStageMetadata(0) != null, migrationConfig, jobMetadata.getLabels())); // } // // private String submitNewJob(MantisJobDefinition jobDefinition) { // try (AutoCloseable l = obtainLock()) { // final MantisJobStatus status = jobOps.submit(jobDefinition); // if (status.getFatalError() != null) { // logger.error("Couldn't submit replacement job for " + name + " - " + status.getFatalError()); // return null; // } else // return status.getJobId(); // } catch (Exception e) { // logger.error("Unexpected error obtaining lock: " + e.getMessage(), e); // return null; // } // } // // void removeJobMgr(final Optional<MantisJobMgr> jobMgrO, String jobId) { // if (jobMgrO.isPresent()) { // final MantisJobMgr jobMgr = jobMgrO.get(); // logger.info("Removing job " + jobMgr.getJobId()); // try (AutoCloseable l = obtainLock()) { // sortedRegisteredJobMgrs.remove(jobMgr); // if (sortedJobMgrs.remove(jobMgr)) { // if (jobMgr.getJobMetadata() != null) { // enforceSla(Optional.of(jobMgr.getJobMetadata())); // } else { // enforceSla(jobMgr.getCompletedJobMetadata()); // } // } else { // enforceSla(Optional.empty()); // } // } catch (Exception e) { // logger.error("Unexpected error locking: " + e.getMessage(), e); // } // } // completedJobs.remove(jobId); // } // // public String submitWithLatestJar(String user) throws InvalidJobException { // try (AutoCloseable l = obtainLock()) { // if (sortedJobMgrs.isEmpty()) { // final CompletedJob lastCompletedJob = getLastCompletedJob(); // if (lastCompletedJob == null) // return null; // try { // String jobId = submitFromCompletedJob(lastCompletedJob.getJobId(), user); // if (jobId != null) // jobSubmissionsCounter.increment(); // return jobId; // } catch (IOException e) { // throw new InvalidJobException(lastCompletedJob.getJobId(), e); // } // } else { // final String jobId = submitNewJob(sortedJobMgrs.last().getJobMetadata(), user); // if (jobId != null) { // jobSubmissionsCounter.increment(); // } // return jobId; // } // } catch (InvalidJobException e) { // throw e; // } catch (Exception e) { // logger.error("Unexpected error submitting with latest jar: " + e.getMessage(), e); // return null; // } // } // // /** // * Obtain a lock on this object, All operations on this object work without checking for concurrent updates. Callers // * are expected to call this method to lock this object for safe modifications and unlock after use. The return object // * can be used in try with resources for reliable unlocking. // * // * @return {@link AutoCloseable} lock object. // */ // public AutoCloseable obtainLock() { // lock.lock(); // return lock::unlock; // } // // static boolean isValidJobName(String name) { // return Pattern.matches("^[A-Za-z]+[A-Za-z0-9+-_=:;]*", name); // } }
4,350
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/JobAlreadyExistsException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; public class JobAlreadyExistsException extends Exception { public JobAlreadyExistsException(String jobId) { super(jobId); } public JobAlreadyExistsException(String jobId, Throwable cause) { super(jobId, cause); } }
4,351
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/MantisStageMetadata.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; import java.util.Collection; import java.util.List; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.descriptor.StageScalingPolicy; public interface MantisStageMetadata { String getJobId(); int getStageNum(); int getNumStages(); MachineDefinition getMachineDefinition(); int getNumWorkers(); List<JobConstraints> getHardConstraints(); List<JobConstraints> getSoftConstraints(); StageScalingPolicy getScalingPolicy(); boolean getScalable(); Collection<MantisWorkerMetadata> getWorkerByIndexMetadataSet(); Collection<MantisWorkerMetadata> getAllWorkers(); MantisWorkerMetadata getWorkerByIndex(int workerIndex) throws InvalidJobException; MantisWorkerMetadata getWorkerByWorkerNumber(int workerNumber) throws InvalidJobException; }
4,352
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/NamedJobs.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; // //import com.fasterxml.jackson.core.JsonProcessingException; //import com.fasterxml.jackson.databind.DeserializationFeature; //import com.fasterxml.jackson.databind.ObjectMapper; //import com.fasterxml.jackson.datatype.jdk8.Jdk8Module; ////import com.google.common.base.Preconditions; // //import com.google.common.base.Preconditions; //import io.mantisrx.common.Label; //import io.mantisrx.runtime.JobOwner; //import io.mantisrx.runtime.JobSla; //import io.mantisrx.runtime.MantisJobDefinition; //import io.mantisrx.runtime.NamedJobDefinition; //import io.mantisrx.runtime.WorkerMigrationConfig; //import io.mantisrx.runtime.descriptor.SchedulingInfo; //import io.mantisrx.runtime.parameter.Parameter; //import io.mantisrx.server.master.MantisAuditLogEvent; //import io.mantisrx.server.master.MantisAuditLogWriter; //import io.mantisrx.server.master.MantisJobMgr; //import io.mantisrx.server.master.MantisJobOperations; //import io.mantisrx.server.master.jobmgmt.JobRegistry; //import org.json.JSONException; //import org.json.JSONObject; //import org.slf4j.Logger; //import org.slf4j.LoggerFactory; //import rx.functions.Action2; //import rx.functions.Func1; //import rx.schedulers.Schedulers; //import rx.subjects.ReplaySubject; ////import scala.util.parsing.json.JSONObject; // //import java.io.IOException; //import java.net.URL; //import java.util.*; //import java.util.concurrent.ConcurrentHashMap; //import java.util.concurrent.ConcurrentMap; //import java.util.concurrent.TimeUnit; //import java.util.concurrent.atomic.AtomicReference; //import java.util.concurrent.locks.ReentrantLock; // public class NamedJobs { // // static class NamedJobLock { // private final ConcurrentMap<String, ReentrantLock> locks = new ConcurrentHashMap<>(); // // synchronized AutoCloseable obtainLock(final String jobName) { // ReentrantLock newLock = new ReentrantLock(); // final ReentrantLock oldLock = locks.putIfAbsent(jobName, newLock); // final ReentrantLock lock = oldLock==null? newLock : oldLock; // lock.lock(); // return new AutoCloseable() { // @Override // public void close() throws Exception { // lock.unlock(); // if(!lock.isLocked()) // locks.remove(jobName); // } // }; // } // } // // public static class JobIdForSubmit { // private final String jobId; // private final boolean isNewJobId; // // public JobIdForSubmit(String jobId, boolean newJobId) { // this.jobId = jobId; // isNewJobId = newJobId; // } // public String getJobId() { // return jobId; // } // public boolean isNewJobId() { // return isNewJobId; // } // } // // private static final ObjectMapper mapper = new ObjectMapper(); // // private final JobRegistry jobRegistry; // private static final Logger logger = LoggerFactory.getLogger(NamedJobs.class); // private MantisStorageProvider storageProvider; // private final MantisJobOperations jobOps; // private final NamedJobLock namedJobLock = new NamedJobLock(); // // public NamedJobs(final JobRegistry jobRegistry, MantisJobOperations jobOps) { // this.jobRegistry = jobRegistry; // this.jobOps = jobOps; // mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); // mapper.registerModule(new Jdk8Module()); // } // // public void init(MantisStorageProvider storageProvider) { // this.storageProvider = storageProvider; // try { // for(NamedJob job: storageProvider.initNamedJobs()) { // job.setJobOps(jobOps); // if(this.jobRegistry.addJobClusterIfAbsent(job) != null) // throw new IllegalStateException("Unexpected to add duplicate namedJob entry for " + job.getName()); // job.setStorageProvider(storageProvider); // } // final AtomicReference<Throwable> errorRef = new AtomicReference<>(); // storageProvider.initNamedJobCompletedJobs() // .doOnNext(cj -> { // final Optional<NamedJob> namedJob = jobRegistry.getJobCluster(cj.getName()); // if (namedJob.isPresent()) // namedJob.get().initCompletedJob(cj); // }) // .doOnError(errorRef::set) // .toBlocking() // .lastOrDefault(null); // if(errorRef.get() != null) // throw new IOException(errorRef.get()); // } catch (IOException e) { // // can't handle this // throw new IllegalStateException(e.getMessage(), e); // } // Schedulers.computation().createWorker().schedulePeriodically(() -> { // for(NamedJob job: jobRegistry.getAllJobClusters()) { // job.enforceSla(Optional.empty()); // } // }, 60, 60, TimeUnit.SECONDS); // } // // public Optional<NamedJob> getJobByName(String name) { // return jobRegistry.getJobCluster(name); // } // // public Collection<NamedJob> getAllNamedJobs() { // return jobRegistry.getAllJobClusters().isEmpty() ? Collections.emptyList() : Collections.unmodifiableCollection(jobRegistry.getAllJobClusters()); // } // // public void deleteJob(String jobId) throws IOException { // final Optional<NamedJob> namedJob = jobRegistry.getJobCluster(NamedJob.getJobName(jobId)); // if (namedJob.isPresent()) { // final Optional<MantisJobMgr> jobMgrO = jobRegistry.getJobManager(jobId); // namedJob.get().removeJobMgr(jobMgrO, jobId); // storageProvider.removeCompledtedJobForNamedJob(namedJob.get().getName(), jobId); // } // } // // public NamedJob createNamedJob(NamedJobDefinition namedJobDefinition) throws InvalidNamedJobException { // final String name = namedJobDefinition.getJobDefinition().getName(); // final String user = namedJobDefinition.getJobDefinition().getUser(); // if(user==null || user.isEmpty()) // throw new InvalidNamedJobException("Must set user in request"); // final JobOwner owner = namedJobDefinition.getOwner(); // if(!NamedJob.isValidJobName(name)) // throw new InvalidNamedJobException("Invalid name for Job Cluster: " + name); // if(jobRegistry.addJobClusterIfAbsent( // new NamedJob(jobOps, // name, // null, // null, // namedJobDefinition.getJobDefinition().getParameters(), // owner, // 0, // false, // namedJobDefinition.getJobDefinition().getIsReadyForJobMaster(), // namedJobDefinition.getJobDefinition().getMigrationConfig(), // namedJobDefinition.getJobDefinition().getLabels() // ) // ) != null // ) { // throw new InvalidNamedJobException(name+" Job Cluster already exists"); // } // final NamedJob job = jobRegistry.getJobCluster(name).get(); // logger.info("Creating NamedJob->:" + job); // job.setStorageProvider(storageProvider); // boolean success=false; // try(AutoCloseable lock = job.obtainLock()) { // job.addJar(new NamedJob.Jar(namedJobDefinition.getJobDefinition().getJobJarFileLocation(), // System.currentTimeMillis(), namedJobDefinition.getJobDefinition().getVersion(), // namedJobDefinition.getJobDefinition().getSchedulingInfo())); // job.setSla(new NamedJob.SLA(namedJobDefinition.getJobDefinition().getSlaMin(), // namedJobDefinition.getJobDefinition().getSlaMax(), // namedJobDefinition.getJobDefinition().getCronSpec(), // namedJobDefinition.getJobDefinition().getCronPolicy())); // storageProvider.storeNewNamedJob(job); // success=true; // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.NAMED_JOB_CREATE, name, // "user=" + user)); // } catch(IOException e) { // throw new InvalidNamedJobException(e.getMessage(), e); // } catch (Exception e) { // logLockError(e); // throw new InvalidNamedJobException(name+": "+e.getMessage(), e); // } // finally { // if(!success) // jobRegistry.removeJobCluster(name); // } // return job; // } // // public Action2<String, Collection<MantisJobMgr>> getNamedJobIdInitializer() { // return new Action2<String, Collection<MantisJobMgr>>() { // @Override // public void call(String name, Collection<MantisJobMgr> jobMgrs) { // Optional<NamedJob> jobCluster = jobRegistry.getJobCluster(name); // if(!jobCluster.isPresent()) // logger.error("Can't find Job Cluster for name=" + name); // else // jobCluster.get().init(jobMgrs); // } // }; // } // // public void deleteNamedJob(String name, String user, Func1<String, Boolean> jobIdDeleter) throws NamedJobDeleteException { // Optional<NamedJob> jobClusterO = jobRegistry.getJobCluster(name); // if(!jobClusterO.isPresent()) // return; // final NamedJob jobCluster = jobClusterO.get(); // try (AutoCloseable lock = jobCluster.obtainLock()) { // if(!jobCluster.getIsActive()) // return; // String activeJobId = deleteAllJobs(name, jobIdDeleter); // if(activeJobId != null) { // logger.warn("Active job " + activeJobId + " exists, not deleting job cluster " + name); // throw new NamedJobDeleteException("Active job exists - " + activeJobId); // } // boolean deleted = storageProvider.deleteNamedJob(name); // jobCluster.setInactive(); // if(deleted) { // jobRegistry.removeJobCluster(name); // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.NAMED_JOB_DELETE, name, // "user=" + user)); // } // } catch(NamedJobDeleteException njde) { // throw njde; // } catch(Exception e) { // logger.error("Error deleting job cluster " + name + ": " + e.getMessage(), e); // throw new NamedJobDeleteException("Unknown error deleting Job Cluster " + name, e); // } // } // // public void setDisabled(String name, String user, boolean status) throws InvalidNamedJobException { // final Optional<NamedJob> jobClusterO = jobRegistry.getJobCluster(name); // if (!jobClusterO.isPresent()) // return; // final NamedJob jobCluster = jobClusterO.get(); // try (AutoCloseable lock = jobCluster.obtainLock()) { // if(!jobCluster.getIsActive()) // throw new InvalidNamedJobException("Job Cluster " + name + " not active"); // if(jobCluster.getDisabled() == status) // return; // no-op // jobCluster.setDisabled(status); // storageProvider.updateNamedJob(jobCluster); // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent( // (status ? MantisAuditLogEvent.Type.NAMED_JOB_DISABLED : MantisAuditLogEvent.Type.NAMED_JOB_ENABLED), // name, "user=" + user)); // } catch (InvalidNamedJobException e) { // throw e; // } catch (IOException e) { // throw new InvalidNamedJobException(e.getMessage(), e); // } catch (Exception e) { // logger.warn("Unexpected error locking job " + name + ": " + e.getMessage(), e); // throw new InvalidNamedJobException("Internal error disabling job " + name, e); // } // } // // private String deleteAllJobs(String name, Func1<String, Boolean> jobIdDeleter) { // // a simple linear search of all entries should suffice, this isn't expected to be called frequently // boolean foundActive=false; // List<MantisJobMgr> jobMgrs = new ArrayList<>(); // for(MantisJobMgr jobMgr: jobRegistry.getAllJobManagers()) { // MantisJobMetadata jobMetadata = jobMgr.getJobMetadata(); // if(jobMetadata.getName().equals(name)) { // jobMgrs.add(jobMgr); // if(jobMgr.isActive()) // return jobMetadata.getJobId(); // } // } // for(MantisJobMgr jobMgr: jobMgrs) { // if(!jobIdDeleter.call(jobMgr.getJobId())) // return jobMgr.getJobId(); // } // return null; // } // // public NamedJob updateNamedJob(NamedJobDefinition namedJobDefinition, boolean createIfNeeded) throws InvalidNamedJobException { // final String name = namedJobDefinition.getJobDefinition().getName(); // final String user = namedJobDefinition.getJobDefinition().getUser(); // if(user==null || user.isEmpty()) // throw new InvalidNamedJobException("Must set user in request"); // Optional<NamedJob> jobO = jobRegistry.getJobCluster(name); // final int slaMin = namedJobDefinition.getJobDefinition().getSlaMin(); // final int slaMax = namedJobDefinition.getJobDefinition().getSlaMax(); // final String cronSpec = namedJobDefinition.getJobDefinition().getCronSpec(); // final NamedJobDefinition.CronPolicy cronPolicy = namedJobDefinition.getJobDefinition().getCronPolicy(); // if(!jobO.isPresent() && createIfNeeded) { // try { // jobO = Optional.ofNullable(createNamedJob(namedJobDefinition)); // } catch (InvalidNamedJobException e) { // jobO = getJobByName(name); // } // } // if(!jobO.isPresent()) // throw new InvalidNamedJobException(name+" job cluster doesn't exist"); // NamedJob jobCluster = jobO.get(); // String version = namedJobDefinition.getJobDefinition().getVersion(); // if(version==null || version.isEmpty()) // version = "" + System.currentTimeMillis(); // SchedulingInfo schedulingInfo = namedJobDefinition.getJobDefinition().getSchedulingInfo(); // if(schedulingInfo==null) { // final List<NamedJob.Jar> jars = jobCluster.getJars(); // schedulingInfo = jars.get(jars.size()-1).getSchedulingInfo(); // } // JobOwner owner = namedJobDefinition.getOwner(); // if(owner == null) // owner = jobCluster.getOwner(); // List<Parameter> parameters = namedJobDefinition.getJobDefinition().getParameters(); // if(parameters==null || parameters.isEmpty()) // parameters = jobCluster.getParameters(); // // List<Label> labels = namedJobDefinition.getJobDefinition().getLabels(); // if(labels == null || labels.isEmpty()) { // labels = jobCluster.getLabels(); // } // final NamedJob.Jar jar = new NamedJob.Jar(namedJobDefinition.getJobDefinition().getJobJarFileLocation(), System.currentTimeMillis(), // version, schedulingInfo); // try (AutoCloseable lock = jobCluster.obtainLock()) { // if(!jobCluster.getIsActive()) // throw new InvalidNamedJobException("Job cluster" + name + " not active"); // jobCluster.setSla(new NamedJob.SLA(slaMin, slaMax, // namedJobDefinition.getJobDefinition().getCronSpec(), // cronPolicy)); // jobCluster.addJar(jar); // jobCluster.setOwner(owner); // jobCluster.setParameters(parameters); // jobCluster.setLabels(labels); // jobCluster.setIsReadyForJobMaster(namedJobDefinition.getJobDefinition().getIsReadyForJobMaster()); // jobCluster.setMigrationConfig(namedJobDefinition.getJobDefinition().getMigrationConfig()); // storageProvider.updateNamedJob(jobCluster); // } catch (InvalidNamedJobException inje) { // throw inje; // } catch (IOException e) { // throw new InvalidNamedJobException(e.getMessage(), e); // } catch (Exception e) { // logLockError(e); // throw new InvalidNamedJobException(name+": "+e.getMessage(), e); // } // try { // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.NAMED_JOB_UPDATE, name, // "user: " + user + ", sla: min=" + slaMin + ", max=" + // slaMax + "; jar=" + mapper.writeValueAsString(jar) + "; cronSpec=" + cronSpec + // ", cronPolicy=" + cronPolicy)); // } catch (JsonProcessingException e) { // logger.warn("Error writing jar object value as json: " + e.getMessage()); // } // return jobCluster; // } // // public NamedJob quickUpdateNamedJob(String user, String name, URL jobJar, String version) throws InvalidNamedJobException { // final Optional<NamedJob> jobO = getJobByName(name); // if (!jobO.isPresent()) // throw new InvalidNamedJobException(name+" job cluster doesn't exist"); // final NamedJob job = jobO.get(); // final NamedJob.Jar latestJar = job.getJar(null); // if(version==null || version.isEmpty()) // version = ""+System.currentTimeMillis(); // NamedJobDefinition namedJobDefinition = new NamedJobDefinition( // new MantisJobDefinition( // name, user, jobJar, version, job.getParameters(), null, 0, // latestJar.getSchedulingInfo(), job.getSla().getMin(), job.getSla().getMax(), // job.getSla().getCronSpec(), job.getSla().getCronPolicy(), job.getIsReadyForJobMaster(), job.getMigrationConfig(),job.getLabels() // ), // job.getOwner()); // return updateNamedJob(namedJobDefinition, false); // } // // public void updateSla(String user, String name, NamedJob.SLA sla, boolean forceEnable) throws InvalidNamedJobException { // if(sla == null) // throw new InvalidNamedJobException("Invalid null SLA"); // final Optional<NamedJob> jobO = getJobByName(name); // if (!jobO.isPresent()) // throw new InvalidNamedJobException(name+" job cluster doesn't exist"); // // final NamedJob job = jobO.get(); // if(forceEnable) // job.setDisabled(false); // job.setSla(sla); // try { // storageProvider.updateNamedJob(job); // } catch (IOException e) { // throw new InvalidNamedJobException(e.getMessage(), e); // } // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.NAMED_JOB_UPDATE, name, // "user: " + user + ", sla: min=" + sla.getMin() + ", max=" + // sla.getMax() + "; cronSpec=" + sla.getCronSpec() + ", cronPolicy=" + sla.getCronPolicy())); // } // // public void quickUpdateLabels(String user, String name, List<Label> newLabels) throws InvalidNamedJobException { // Preconditions.checkNotNull(newLabels); // Preconditions.checkNotNull(name); // final Optional<NamedJob> jobO = getJobByName(name); // if (!jobO.isPresent()) { // throw new InvalidNamedJobException(name+" job cluster doesn't exist"); // } // final NamedJob job = jobO.get(); // job.setLabels(newLabels); // try { // storageProvider.updateNamedJob(job); // } catch (IOException e) { // throw new InvalidNamedJobException(e.getMessage(), e); // } // MantisAuditLogWriter.getInstance().getObserver().onNext( // new MantisAuditLogEvent(MantisAuditLogEvent.Type.NAMED_JOB_UPDATE, name,"user: " + user + ", labels: " + newLabels)); // } // // public void updateMigrateStrategy(String user, String name, WorkerMigrationConfig migrationConfig) throws InvalidNamedJobException { // if(migrationConfig == null) // throw new InvalidNamedJobException("Invalid null migrationConfig"); // final Optional<NamedJob> jobO = getJobByName(name); // if (!jobO.isPresent()) // throw new InvalidNamedJobException(name+" job cluster doesn't exist"); // final NamedJob job = jobO.get(); // job.setMigrationConfig(migrationConfig); // try { // storageProvider.updateNamedJob(job); // } catch (IOException e) { // throw new InvalidNamedJobException(e.getMessage(), e); // } // MantisAuditLogWriter.getInstance() // .getObserver().onNext(new MantisAuditLogEvent(MantisAuditLogEvent.Type.NAMED_JOB_UPDATE, name, // "user: " + user + " migrationConfig: " + migrationConfig.toString())); // } // // public String quickSubmit(String jobName, String user) throws InvalidNamedJobException, InvalidJobException { // final Optional<NamedJob> jobO = getJobByName(jobName); // if (!jobO.isPresent()) // throw new InvalidNamedJobException(jobName + " job cluster doesn't exist"); // return jobO.get().quickSubmit(user); // } // // // Resolve fields of the job definition: // // - if job parameters not specified, inherit from existing ones in name job // // - if new jar given, expect scheduling info as well, and use them, else inherit from existing name job // // - always use worker migration config from NamedJob // // Return new object containing the above resolved fields. // public MantisJobDefinition getResolvedJobDefinition(final MantisJobDefinition jobDefinition) throws InvalidNamedJobException { // String version = jobDefinition.getVersion(); // Optional<NamedJob> namedJobO = getJobByName(jobDefinition.getName()); // if (!namedJobO.isPresent()) // throw new InvalidNamedJobException(jobDefinition.getName()+" job cluster doesn't exist"); // final NamedJob namedJob = namedJobO.get(); // List<Parameter> parameters = jobDefinition.getParameters(); // if(parameters==null || parameters.isEmpty()) // parameters = namedJob.getParameters(); // // List<Label> labels = jobDefinition.getLabels(); // if(labels == null || labels.isEmpty()) { // labels = namedJob.getLabels(); // } // NamedJob.Jar jar=null; // SchedulingInfo schedulingInfo=null; // if(jobDefinition.getJobJarFileLocation()!=null) { // if(jobDefinition.getSchedulingInfo()==null) // throw new InvalidNamedJobException("Scheduling info must be provided along with new job Jar"); // schedulingInfo = jobDefinition.getSchedulingInfo(); // if(version==null || version.isEmpty()) // version = ""+System.currentTimeMillis(); // jar = new NamedJob.Jar(jobDefinition.getJobJarFileLocation(), System.currentTimeMillis(), version, // jobDefinition.getSchedulingInfo()); // updateNamedJob(new NamedJobDefinition( // new MantisJobDefinition( // namedJob.getName(), jobDefinition.getUser(), jar.getUrl(), version, parameters, null, 0, // schedulingInfo, namedJob.getSla().getMin(), namedJob.getSla().getMax(), // namedJob.getSla().getCronSpec(), namedJob.getSla().getCronPolicy(), // jobDefinition.getIsReadyForJobMaster(), namedJob.getMigrationConfig(),labels // ), // namedJob.getOwner() // ), // false); // } // if(jar == null) { // jar = namedJob.getJar(version); // schedulingInfo = jobDefinition.getSchedulingInfo()==null? // cloneSchedulingInfo(jar.getSchedulingInfo()) : // getVerifiedSchedulingInfo(namedJob, jar, jobDefinition.getSchedulingInfo(), version); // } // if(jar == null) // throw new InvalidNamedJobException(version+": no such versioned jar found for job cluster " + jobDefinition.getName()); // return new MantisJobDefinition( // jobDefinition.getName(), jobDefinition.getUser(), jar.getUrl(), jar.getVersion(), // parameters, jobDefinition.getJobSla(), jobDefinition.getSubscriptionTimeoutSecs(), schedulingInfo, // namedJob.getSla().getMin(), namedJob.getSla().getMax(), // namedJob.getSla().getCronSpec(), namedJob.getSla().getCronPolicy(), jobDefinition.getIsReadyForJobMaster(), // namedJob.getMigrationConfig(),labels // ); // } // // private SchedulingInfo cloneSchedulingInfo(SchedulingInfo schedulingInfo) { // // we basically need a "deep copy" of the scheduling info, trying to get it done easily via ObjectMapper since // // we already have a reference to it. // try { // return mapper.readValue(mapper.writeValueAsString(schedulingInfo), SchedulingInfo.class); // } catch (IOException e) { // logger.warn("Unexpected: " + e.getMessage(), e); // return null; // } // } // // private SchedulingInfo getVerifiedSchedulingInfo(NamedJob namedJob, NamedJob.Jar jar, SchedulingInfo schedulingInfo, String version) throws InvalidNamedJobException { // int givenNumStages = schedulingInfo.getStages().size(); // int existingNumStages = jar.getSchedulingInfo().getStages().size(); // if (namedJob.getIsReadyForJobMaster()) { // if (schedulingInfo.forStage(0) != null) // givenNumStages--; // decrement to get net numStages without job master // if (jar.getSchedulingInfo().forStage(0) != null) // existingNumStages--; // } // if(givenNumStages != existingNumStages) // throw new InvalidNamedJobException("Mismatched scheduling info: expecting #stages=" + // existingNumStages + " for given jar version [" + version + // "], where as, given scheduling info has #stages=" + givenNumStages); // return schedulingInfo; // } // // public JobIdForSubmit getJobIdForSubmit(String name, MantisJobDefinition jobDefinition) throws InvalidNamedJobException { // final Optional<NamedJob> jobO = getJobByName(name); // if (!jobO.isPresent()) { // throw new InvalidNamedJobException(name+" job cluster doesn't exist"); // } // final NamedJob job = jobO.get(); // try (AutoCloseable lock = job.obtainLock()) { // if(job.getDisabled()) // throw new InvalidNamedJobException("Job " + name + " is disabled, submit disallowed"); // final String uniqueTag = getUniqueTag(jobDefinition.getJobSla().getUserProvidedType()); // final MantisJobMgr jobWithUniqueTag = job.getJobWithUniqueTag(uniqueTag); // if(jobWithUniqueTag!=null && jobWithUniqueTag.markNewSubscriber()) { // return new JobIdForSubmit(jobWithUniqueTag.getJobId(), false); // } // String jobId = NamedJob.getJobId(name, job.getNextJobNumber()); // storageProvider.updateNamedJob(job); // return new JobIdForSubmit(jobId, true); // } // catch (IOException e) { // throw new InvalidNamedJobException(e.getMessage(), e); // } // catch (InvalidNamedJobException e) { // throw e; // } // catch (Exception e) { // logLockError(e); // throw new InvalidNamedJobException("Unexpected to not get next id for job cluster " + name); // } // } // // static String getUniqueTag(String userProvidedType) { // if(userProvidedType==null || userProvidedType.isEmpty()) // return null; // try { // JSONObject jsonObject = new JSONObject(userProvidedType); // return jsonObject.optString(JobSla.uniqueTagName); // } // catch (Exception e) { // return null; // } // } // // public String getLatestJobId(String name) throws InvalidNamedJobException { // final Optional<NamedJob> jobO = getJobByName(name); // if (!jobO.isPresent()) { // throw new InvalidNamedJobException(name+" job cluster doesn't exist"); // } // final NamedJob job = jobO.get(); // if(!job.getIsActive()) // throw new InvalidNamedJobException(name+" job cluster not active"); // return NamedJob.getJobId(name, job.getLastJobCount()); // } // // private void logLockError(Exception e) { // logger.warn("Unexpected to not get a lock: " + e.getMessage(), e); // } // // public AutoCloseable lockJobName(String jobName) { // return namedJobLock.obtainLock(jobName); // } // // }
4,353
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/server/master/store/MantisWorkerMetadata.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.store; import java.util.List; import java.util.Optional; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.domain.WorkerId; /** * Metadata object for a Mantis worker. Modification operations do not perform locking. Instead, a lock can be * obtained via the <code>obtainLock()</code> method which is an instance of {@link AutoCloseable}. */ public interface MantisWorkerMetadata { int getWorkerIndex(); int getWorkerNumber(); WorkerId getWorkerId(); String getJobId(); int getStageNum(); int getMetricsPort(); int getDebugPort(); int getConsolePort(); int getCustomPort(); // cluster on which the worker was launched Optional<String> getCluster(); /** * Get number of ports for this worker, including the metrics port * * @return The number of ports */ int getNumberOfPorts(); List<Integer> getPorts(); void addPorts(List<Integer> ports); int getTotalResubmitCount(); /** * Get the worker number (not index) of which this is a resubmission of. * * @return */ int getResubmitOf(); MantisJobState getState(); String getSlave(); String getSlaveID(); long getAcceptedAt(); long getLaunchedAt(); long getStartingAt(); long getStartedAt(); long getCompletedAt(); JobCompletedReason getReason(); }
4,354
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/StringConstants.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master; public class StringConstants { // User for actions performed by Mantis master public static final String MANTIS_MASTER_USER = "MantisMaster"; }
4,355
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/JobClustersManagerService.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master; import akka.actor.ActorRef; import akka.util.Timeout; import io.mantisrx.master.jobcluster.proto.BaseResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.server.core.BaseService; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.CompletionStage; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static akka.pattern.PatternsCS.ask; public class JobClustersManagerService extends BaseService { private static final Logger logger = LoggerFactory.getLogger(JobClustersManagerService.class); private final ActorRef jobClustersManagerActor; private final MantisScheduler scheduler; private final boolean loadJobsFromStore; public JobClustersManagerService(final ActorRef jobClustersManagerActor, final MantisScheduler scheduler, final boolean loadJobsFromStore) { super(true); this.jobClustersManagerActor = jobClustersManagerActor; this.scheduler = scheduler; this.loadJobsFromStore = loadJobsFromStore; } @Override public void start() { super.awaitActiveModeAndStart(() -> { // initialize job clusters manager final CountDownLatch latch = new CountDownLatch(1); final long startTime = System.currentTimeMillis(); try { long masterInitTimeoutSecs = ConfigurationProvider.getConfig().getMasterInitTimeoutSecs(); CompletionStage<JobClusterManagerProto.JobClustersManagerInitializeResponse> initResponse = ask(jobClustersManagerActor, new JobClusterManagerProto.JobClustersManagerInitialize(scheduler, loadJobsFromStore), Timeout.apply(masterInitTimeoutSecs, TimeUnit.SECONDS)) .thenApply(JobClusterManagerProto.JobClustersManagerInitializeResponse.class::cast); initResponse.whenComplete((resp, t) -> { logger.info("JobClustersManagerActor init response {}", resp); if (t != null || !resp.responseCode.equals(BaseResponse.ResponseCode.SUCCESS)) { logger.error("failed to initialize JobClustersManagerActor, committing suicide...", t); System.exit(3); } latch.countDown(); }); } catch (Exception e) { logger.error("caught exception when initializing JobClustersManagerService, committing suicide...", e); System.exit(3); } try { latch.await(); } catch (InterruptedException e) { logger.error("interrupted waiting for latch countdown during JobClustersManagerInitialize, committing suicide..", e); System.exit(3); } logger.info("JobClustersManager initialize took {} sec", TimeUnit.SECONDS.convert(System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS)); }); } }
4,356
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/JobListHelperActor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master; import akka.actor.AbstractActor; import akka.actor.ActorRef; import akka.actor.Props; import akka.util.Timeout; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter; import io.mantisrx.master.jobcluster.MantisJobClusterMetadataView; import io.mantisrx.master.jobcluster.job.MantisJobMetadataView; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.server.master.domain.JobId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.schedulers.Schedulers; import scala.concurrent.duration.Duration; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import static akka.pattern.PatternsCS.ask; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS; /** * Helper Actor used by JobClustersManager for listing jobs and clusters. * By offloading the scatter-gather to a separate Actor the JCM is free to move on to processing other messages. * * This Actor is stateless and can be part of a pool of actors if performance becomes a bottle neck */ public class JobListHelperActor extends AbstractActor { private final Logger logger = LoggerFactory.getLogger(JobListHelperActor.class); public static Props props() { return Props.create(JobListHelperActor.class); } public JobListHelperActor() { } @Override public Receive createReceive() { return receiveBuilder() .match(ListJobClusterRequestWrapper.class, this::onJobClustersList) .match(ListJobRequestWrapper.class, this::onJobList) .match(ListJobIdRequestWrapper.class, this::onJobIdList) .matchAny(x -> logger.warn("Unexpected message {}", x)) .build(); } private void onJobList(ListJobRequestWrapper request) { ActorRef sender = getSender(); Timeout t = new Timeout(Duration.create(500, TimeUnit.MILLISECONDS)); List<MantisJobMetadataView> resultList = Lists.newArrayList(); getJobClustersMatchingRegex(request.jobClusterInfoMap.values(), request.listJobsRequest.getCriteria()) .flatMap((jobClusterInfo) -> { CompletionStage<JobClusterManagerProto.ListJobsResponse> respCS = ask(jobClusterInfo.jobClusterActor, request.listJobsRequest, t) .thenApply(JobClusterManagerProto.ListJobsResponse.class::cast); return Observable.from(respCS.toCompletableFuture(), Schedulers.io()) .onErrorResumeNext(ex -> { logger.warn("caught exception {}", ex.getMessage(), ex); return Observable.empty(); }); }) .filter(Objects::nonNull) .flatMapIterable((listJobsResp) -> listJobsResp.getJobList()) .toSortedList((o1, o2) -> Long.compare(o1.getJobMetadata().getSubmittedAt(), o2.getJobMetadata().getSubmittedAt())) .subscribeOn(Schedulers.computation()) .subscribe( resultList::addAll, (e) -> { request.sender.tell(new JobClusterManagerProto.ListJobsResponse(request.listJobsRequest.requestId, SERVER_ERROR, e.getMessage(), resultList), sender); },() -> { // todo limit is applied at cluster level as well if(request.listJobsRequest.getCriteria().getLimit().isPresent()) { // int limit = request.listJobsRequest.getCriteria().getLimit().get(); // request.sender.tell(new JobClusterManagerProto.ListJobsResponse(request.listJobsRequest.requestId, SUCCESS, "", resultList.subList(0, Math.min(resultList.size(), limit))), sender); // } request.sender.tell(new JobClusterManagerProto.ListJobsResponse(request.listJobsRequest.requestId, SUCCESS, "", resultList), sender); }) ; } private void onJobIdList(ListJobIdRequestWrapper request) { if(logger.isTraceEnabled()) { logger.trace("In onJobIdList {}", request); } ActorRef sender = getSender(); Timeout t = new Timeout(Duration.create(500, TimeUnit.MILLISECONDS)); List<JobClusterProtoAdapter.JobIdInfo> resultList = Lists.newArrayList(); getJobClustersMatchingRegex(request.jobClusterInfoMap.values(),request.listJobIdsRequest.getCriteria()) .flatMap((jobClusterInfo) -> { CompletionStage<JobClusterManagerProto.ListJobIdsResponse> respCS = ask(jobClusterInfo.jobClusterActor, request.listJobIdsRequest, t) .thenApply(JobClusterManagerProto.ListJobIdsResponse.class::cast); return Observable.from(respCS.toCompletableFuture(), Schedulers.io()) .onErrorResumeNext(ex -> { logger.warn("caught exception {}", ex.getMessage(), ex); return Observable.empty(); }); }) .filter(Objects::nonNull) .map(JobClusterManagerProto.ListJobIdsResponse::getJobIds) .subscribeOn(Schedulers.computation()) .subscribe( resultList::addAll ,(error) -> { logger.warn("Exception in JobListHelperActor:onJobIdList", error); request.sender.tell(new JobClusterManagerProto.ListJobIdsResponse(request.listJobIdsRequest.requestId, SERVER_ERROR, error.getMessage(), resultList), sender); },() -> { if(logger.isTraceEnabled()) { logger.trace("Exit onJobIdList {}", resultList); } // if(request.listJobIdsRequest.getCriteria().getLimit().isPresent()) { // int limit = request.listJobIdsRequest.getCriteria().getLimit().get(); // request.sender.tell(new JobClusterManagerProto.ListJobIdsResponse(request.listJobIdsRequest.requestId, SUCCESS, "", resultList.subList(0, Math.min(resultList.size(), limit))), sender); // } if(logger.isTraceEnabled()) { logger.trace("Exit onJobIdList {}", resultList); } request.sender.tell(new JobClusterManagerProto.ListJobIdsResponse(request.listJobIdsRequest.requestId, SUCCESS, "", resultList), sender); }); } private void onJobClustersList(ListJobClusterRequestWrapper request) { if(logger.isTraceEnabled()) { logger.trace("In onJobClustersListRequest {}", request); } ActorRef callerActor = getSender(); Timeout timeout = new Timeout(Duration.create(500, TimeUnit.MILLISECONDS)); List<MantisJobClusterMetadataView> clusterList = Lists.newArrayList(); Observable.from(request.jobClusterInfoMap.values()) .flatMap((jInfo) -> { CompletionStage<JobClusterManagerProto.GetJobClusterResponse> respCS = ask(jInfo.jobClusterActor, new JobClusterManagerProto.GetJobClusterRequest(jInfo.clusterName), timeout) .thenApply(JobClusterManagerProto.GetJobClusterResponse.class::cast); return Observable.from(respCS.toCompletableFuture(), Schedulers.io()) .onErrorResumeNext(ex -> { logger.warn("caught exception {}", ex.getMessage(), ex); return Observable.empty(); }); }) .filter((resp) -> resp !=null && resp.getJobCluster().isPresent()) .map((resp) -> resp.getJobCluster().get()) //.collect((Func0<ArrayList<MantisJobClusterMetadataView>>) ArrayList::new,ArrayList::add) .doOnError(this::logError) .subscribeOn(Schedulers.computation()) //.toBlocking() .subscribe( clusterList::add ,(err) -> { logger.warn("Exception in onJobClusterList ", err); if(logger.isTraceEnabled()) { logger.trace("Exit onJobClustersListRequest {}", err); } request.sender.tell(new JobClusterManagerProto.ListJobClustersResponse(request.listJobClustersRequest.requestId, SERVER_ERROR, err.getMessage(), clusterList), callerActor); },() -> { if(logger.isTraceEnabled()) { logger.trace("Exit onJobClustersListRequest {}", clusterList); } request.sender.tell(new JobClusterManagerProto.ListJobClustersResponse(request.listJobClustersRequest.requestId, SUCCESS, "", clusterList), callerActor); }) ; } private void logError(Throwable e) { logger.error("Exception occurred retrieving job cluster list {}", e.getMessage()); } private Observable<JobClustersManagerActor.JobClusterInfo> getJobClustersMatchingRegex(Collection<JobClustersManagerActor.JobClusterInfo> jobClusterList, JobClusterManagerProto.ListJobCriteria criteria) { return Observable.from(jobClusterList) .filter((jcInfo) -> { if(criteria.getMatchingRegex().isPresent()) { try { return Pattern.compile(criteria.getMatchingRegex().get(), Pattern.CASE_INSENSITIVE) .matcher(jcInfo.clusterName).find(); } catch(Exception e) { logger.warn("Invalid regex {}", e.getMessage()); return true; } } else { return true; } }); } static class ListJobClusterRequestWrapper { private final JobClusterManagerProto.ListJobClustersRequest listJobClustersRequest; private final ActorRef sender; private final Map<String, JobClustersManagerActor.JobClusterInfo> jobClusterInfoMap; public ListJobClusterRequestWrapper(final JobClusterManagerProto.ListJobClustersRequest request, final ActorRef sender, final Map<String, JobClustersManagerActor.JobClusterInfo> jobClusterInfoMap) { this.jobClusterInfoMap = jobClusterInfoMap; this.sender = sender; this.listJobClustersRequest = request; } public JobClusterManagerProto.ListJobClustersRequest getListJobClustersRequest() { return listJobClustersRequest; } public ActorRef getSender() { return sender; } public Map<String, JobClustersManagerActor.JobClusterInfo> getJobClusterInfoMap() { return jobClusterInfoMap; } } static class ListJobRequestWrapper { private final JobClusterManagerProto.ListJobsRequest listJobsRequest; private final ActorRef sender; private final Map<String, JobClustersManagerActor.JobClusterInfo> jobClusterInfoMap; public ListJobRequestWrapper(JobClusterManagerProto.ListJobsRequest listJobsRequest, ActorRef sender, Map<String, JobClustersManagerActor.JobClusterInfo> jobClusterInfoMap) { this.listJobsRequest = listJobsRequest; this.sender = sender; this.jobClusterInfoMap = jobClusterInfoMap; } } static class ListJobIdRequestWrapper { private final JobClusterManagerProto.ListJobIdsRequest listJobIdsRequest; private final ActorRef sender; private final Map<String, JobClustersManagerActor.JobClusterInfo> jobClusterInfoMap; public ListJobIdRequestWrapper(JobClusterManagerProto.ListJobIdsRequest listJobIdsRequest, ActorRef sender, Map<String, JobClustersManagerActor.JobClusterInfo> jobClusterInfoMap) { this.listJobIdsRequest = listJobIdsRequest; this.sender = sender; this.jobClusterInfoMap = jobClusterInfoMap; } } }
4,357
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/JobClustersManagerActor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master; import akka.actor.AbstractActorWithTimers; import akka.actor.ActorPaths; import akka.actor.ActorRef; import akka.actor.Props; import akka.actor.SupervisorStrategy; import akka.actor.Terminated; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.common.metrics.spectator.GaugeCallback; import io.mantisrx.common.metrics.spectator.MetricGroupId; import io.mantisrx.master.akka.MantisActorSupervisorStrategy; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.jobcluster.IJobClusterMetadata; import io.mantisrx.master.jobcluster.JobClusterActor; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.JobHelper; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.proto.BaseResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLARequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest; import io.mantisrx.master.jobcluster.proto.JobClusterProto; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.domain.IJobClusterDefinition; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.MantisJobStore; //import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.scheduler.WorkerEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.schedulers.Schedulers; import java.time.Duration; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletionStage; import java.util.stream.Collectors; import static akka.pattern.PatternsCS.ask; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_CONFLICT; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_NOT_FOUND; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS_CREATED; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.CreateJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.CreateJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DeleteJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DeleteJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.JobClustersManagerInitialize; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.JobClustersManagerInitializeResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.KillJobRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.KillJobResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListCompletedJobsInClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListCompletedJobsInClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobClustersRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobClustersResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListWorkersRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListWorkersResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ReconcileJobCluster; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterRequest; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLAResponse; import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse; import static java.util.Optional.empty; import static java.util.Optional.ofNullable; /* Supervisor Actor responsible for creating/deletion/listing of all Job Clusters in the system */ public class JobClustersManagerActor extends AbstractActorWithTimers implements IJobClustersManager { private static final String CHECK_CLUSTERS_TIMER_KEY = "CHECK_CLUSTER_TIMER"; public static final int STATE_TRANSITION_TIMEOUT_MSECS = 5000; private final Logger logger = LoggerFactory.getLogger(JobClustersManagerActor.class); private final long checkAgainInSecs = 30; private final Counter numJobClusterInitFailures; private final Counter numJobClusterInitSuccesses; private Receive initializedBehavior; public static Props props(final MantisJobStore jobStore, final LifecycleEventPublisher eventPublisher) { return Props.create(JobClustersManagerActor.class, jobStore, eventPublisher) .withMailbox("akka.actor.metered-mailbox"); } private final MantisJobStore jobStore; private final LifecycleEventPublisher eventPublisher; private MantisScheduler mantisScheduler = null; JobClusterInfoManager jobClusterInfoManager; private ActorRef jobListHelperActor; public JobClustersManagerActor(final MantisJobStore store, final LifecycleEventPublisher eventPublisher) { this.jobStore = store; this.eventPublisher = eventPublisher; MetricGroupId metricGroupId = getMetricGroupId(); Metrics m = new Metrics.Builder() .id(metricGroupId) .addCounter("numJobClusterInitFailures") .addCounter("numJobClusterInitSuccesses") .build(); m = MetricsRegistry.getInstance().registerAndGet(m); this.numJobClusterInitFailures = m.getCounter("numJobClusterInitFailures"); this.numJobClusterInitSuccesses = m.getCounter("numJobClusterInitSuccesses"); initializedBehavior = getInitializedBehavior(); } MetricGroupId getMetricGroupId() { return new MetricGroupId("JobClustersManagerActor"); } /** * JobClusterManager Actor behaviors 27 total * - Init * // CLUSTER RELATED * - CreateJC * - InitalizeJCResponse * - DeleteJC * - DeleteJCResponse * - UpdateJC * - UpdateLabel * - UpdateSLA * - UpdateArtifact * - UpdateMigrationStrat * - ENABLE JC * - DISABLE JC * - GET CLUSTER * - LIST completed jobs * - GET LAST SUBMITTED JOB * - LIST archived workers * * - LIST JCs * - LIST JOBS * - LIST JOB IDS * - LIST WORKERS -> (pass thru to each Job Actor) * * * // pass thru to JOB * - SUBMIT JOB -> (INIT JOB on Job Actor) * - KILL JOB -> (pass thru Job Actor) * - GET JOB -> (pass thru Job Actor) * - GET JOB SCHED INFO -> (pass thru Job Actor) * - SCALE JOB -> (pass thru Job Actor) * - RESUBMIT WORKER -> (pass thru Job Actor) * * - WORKER EVENT -> (pass thru Job Actor) * @return */ private Receive getInitializedBehavior() { String state = "initialized"; return receiveBuilder() .match(ReconcileJobCluster.class, this::onReconcileJobClusters) // Specific Job Cluster related messages .match(CreateJobClusterRequest.class, this::onJobClusterCreate) .match(JobClusterProto.InitializeJobClusterResponse.class, this::onJobClusterInitializeResponse) .match(DeleteJobClusterRequest.class, this::onJobClusterDelete) .match(JobClusterProto.DeleteJobClusterResponse.class, this::onJobClusterDeleteResponse) .match(UpdateJobClusterRequest.class, this::onJobClusterUpdate) .match(UpdateJobClusterSLARequest.class, this::onJobClusterUpdateSLA) .match(UpdateJobClusterArtifactRequest.class, this::onJobClusterUpdateArtifact) .match(UpdateJobClusterLabelsRequest.class, this::onJobClusterUpdateLabels) .match(UpdateJobClusterWorkerMigrationStrategyRequest.class, this::onJobClusterUpdateWorkerMigrationConfig) .match(EnableJobClusterRequest.class, this::onJobClusterEnable) .match(DisableJobClusterRequest.class, this::onJobClusterDisable) .match(GetJobClusterRequest.class, this::onJobClusterGet) .match(ListCompletedJobsInClusterRequest.class, this::onJobListCompleted) .match(GetLastSubmittedJobIdStreamRequest.class, this::onGetLastSubmittedJobIdSubject) .match(ListArchivedWorkersRequest.class, this::onListArchivedWorkers) // List Job Cluster related messages .match(ListJobClustersRequest.class, this::onJobClustersList) // List Jobs related messages .match(ListJobsRequest.class, this::onJobList) .match(ListJobIdsRequest.class, this::onJobIdList) .match(ListWorkersRequest.class, this::onListActiveWorkers) //delegate to job .match(SubmitJobRequest.class, this::onJobSubmit) .match(KillJobRequest.class, this::onJobKillRequest) // .match(JobClusterProto.KillJobResponse.class, this::onJobKillResponse) .match(GetJobDetailsRequest.class, this::onGetJobDetailsRequest) .match(GetJobSchedInfoRequest.class, this::onGetJobStatusSubject) .match(GetLatestJobDiscoveryInfoRequest.class, this::onGetLatestJobDiscoveryInfo) .match(ScaleStageRequest.class, this::onScaleStage) .match(ResubmitWorkerRequest.class, this::onResubmitWorker) //delegate to worker .match(WorkerEvent.class, this::onWorkerEvent) .match(Terminated.class, this::onTerminated) // Unexpected .match(JobClustersManagerInitialize.class, (x) -> getSender().tell(new JobClustersManagerInitializeResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state) ), getSelf())) .matchAny(x -> logger.warn("unexpected message {} received by Job Cluster Manager actor. In initialized state ", x)) .build(); } private String genUnexpectedMsg(String event, String state) { return String.format("Unexpected message %s received by JobClustersManager actor in %s State", event, state); } private Receive getInitializingBehavior() { String state = "initializing"; return receiveBuilder() // EXPECTED MESSAGES BEGIN .match(JobClustersManagerInitialize.class, this::initialize) // EXPECTED MESSAGES END // UNEXPECTED MESSAGES BEGIN .match(ReconcileJobCluster.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), state))) .match(CreateJobClusterRequest.class, (x) -> getSender().tell(new CreateJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), x.getJobClusterDefinition().getName()), getSelf())) .match(JobClusterProto.InitializeJobClusterResponse.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), state))) .match(DeleteJobClusterRequest.class, (x) -> getSender().tell(new DeleteJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf())) .match(JobClusterProto.DeleteJobClusterResponse.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), state))) .match(UpdateJobClusterRequest.class, (x) -> getSender().tell(new UpdateJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf())) .match(UpdateJobClusterSLARequest.class, (x) -> getSender().tell(new UpdateJobClusterSLAResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf())) .match(UpdateJobClusterArtifactRequest.class, (x) -> getSender().tell(new UpdateJobClusterArtifactResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf())) .match(UpdateJobClusterLabelsRequest.class, (x) -> getSender().tell(new UpdateJobClusterLabelsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf())) .match(UpdateJobClusterWorkerMigrationStrategyRequest.class, (x) -> getSender().tell(new UpdateJobClusterWorkerMigrationStrategyResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf())) .match(EnableJobClusterRequest.class, (x) -> getSender().tell(new EnableJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf())) .match(DisableJobClusterRequest.class, (x) -> getSender().tell(new DisableJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf())) .match(GetJobClusterRequest.class, (x) -> getSender().tell(new GetJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), empty()), getSelf())) .match(ListCompletedJobsInClusterRequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), state))) .match(GetLastSubmittedJobIdStreamRequest.class, (x) -> getSender().tell(new GetLastSubmittedJobIdStreamResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), empty()), getSelf())) .match(ListArchivedWorkersRequest.class, (x) -> getSender().tell(new ListArchivedWorkersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), Lists.newArrayList()), getSelf())) .match(ListJobClustersRequest.class, (x) -> getSender().tell(new ListJobClustersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), Lists.newArrayList()), getSelf())) .match(ListJobsRequest.class, (x) -> getSender().tell(new ListJobsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), Lists.newArrayList()), getSelf())) .match(ListJobIdsRequest.class, (x) -> getSender().tell(new ListJobIdsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), Lists.newArrayList()), getSelf())) .match(ListWorkersRequest.class, (x) -> getSender().tell(new ListWorkersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), Lists.newArrayList()), getSelf())) .match(SubmitJobRequest.class, (x) -> getSender().tell(new SubmitJobResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), empty()), getSelf())) .match(KillJobRequest.class, (x) -> getSender().tell(new KillJobResponse(x.requestId, CLIENT_ERROR, JobState.Noop, genUnexpectedMsg(x.toString(), state), x.getJobId(), x.getUser()), getSelf())) .match(JobClusterProto.KillJobResponse.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), state))) .match(GetJobDetailsRequest.class, (x) -> getSender().tell(new GetJobDetailsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), empty()), getSelf())) .match(GetJobSchedInfoRequest.class, (x) -> getSender().tell(new GetJobSchedInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), empty()), getSelf())) .match(GetLatestJobDiscoveryInfoRequest.class, (x) -> getSender().tell(new GetLatestJobDiscoveryInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), empty()), getSelf())) .match(ScaleStageRequest.class, (x) -> getSender().tell(new ScaleStageResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state), 0), getSelf())) .match(ResubmitWorkerRequest.class, (x) -> getSender().tell(new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), state)), getSelf())) .match(WorkerEvent.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), state))) // everything else .matchAny(x -> logger.warn("unexpected message {} received by Job Cluster Manager actor. It needs to be initialized first ", x)) // UNEXPECTED MESSAGES BEGIN .build(); } private void initialize(JobClustersManagerInitialize initMsg) { ActorRef sender = getSender(); try { logger.info("In JobClustersManagerActor:initialize"); this.jobListHelperActor = getContext().actorOf(JobListHelperActor.props(), "JobListHelperActor"); getContext().watch(jobListHelperActor); mantisScheduler = initMsg.getScheduler(); Map<String, IJobClusterMetadata> jobClusterMap = new HashMap<>(); this.jobClusterInfoManager = new JobClusterInfoManager(jobStore, mantisScheduler, eventPublisher); if (!initMsg.isLoadJobsFromStore()) { getContext().become(initializedBehavior); sender.tell(new JobClustersManagerInitializeResponse(initMsg.requestId, SUCCESS, "JobClustersManager successfully inited"), getSelf()); } else { List<IJobClusterMetadata> jobClusters = jobStore.loadAllJobClusters(); logger.info("Read {} job clusters from storage", jobClusters.size()); List<IMantisJobMetadata> activeJobs = jobStore.loadAllActiveJobs(); logger.info("Read {} jobs from storage", activeJobs.size()); List<CompletedJob> completedJobs = jobStore.loadAllCompletedJobs(); logger.info("Read {} completed jobs from storage", completedJobs.size()); for (IJobClusterMetadata jobClusterMeta : jobClusters) { String clusterName = jobClusterMeta.getJobClusterDefinition().getName(); jobClusterMap.put(clusterName, jobClusterMeta); } Map<String, List<IMantisJobMetadata>> clusterToJobMap = new HashMap<>(); Map<String, List<CompletedJob>> clusterToCompletedJobMap = new HashMap<>(); // group jobs by cluster for (IMantisJobMetadata jobMeta : activeJobs) { String clusterName = jobMeta.getClusterName(); clusterToJobMap.computeIfAbsent(clusterName, k -> new ArrayList<>()).add(jobMeta); } for (CompletedJob jobMeta : completedJobs) { String clusterName = jobMeta.getName(); clusterToCompletedJobMap.computeIfAbsent(clusterName, k -> new ArrayList<>()).add(jobMeta); } long masterInitTimeoutSecs = ConfigurationProvider.getConfig().getMasterInitTimeoutSecs(); long timeout = ((masterInitTimeoutSecs - 60)) > 0 ? (masterInitTimeoutSecs - 60) : masterInitTimeoutSecs; Observable.from(jobClusterMap.values()) .filter((jobClusterMeta) -> jobClusterMeta != null && jobClusterMeta.getJobClusterDefinition() != null) .flatMap((jobClusterMeta) -> { Duration t = Duration.ofSeconds(timeout); Optional<JobClusterInfo> jobClusterInfoO = jobClusterInfoManager.createClusterActorAndRegister(jobClusterMeta.getJobClusterDefinition()); if (!jobClusterInfoO.isPresent()) { logger.info("skipping job cluster {} on bootstrap as actor creating failed", jobClusterMeta.getJobClusterDefinition().getName()); return Observable.empty(); } JobClusterInfo jobClusterInfo = jobClusterInfoO.get(); List<IMantisJobMetadata> jobList = Lists.newArrayList(); List<IMantisJobMetadata> jList = clusterToJobMap.get(jobClusterMeta.getJobClusterDefinition().getName()); if (jList != null) { jobList.addAll(jList); } List<CompletedJob> completedJobsList = Lists.newArrayList(); List<CompletedJob> cList = clusterToCompletedJobMap.get(jobClusterMeta.getJobClusterDefinition().getName()); if (cList != null) { completedJobsList.addAll(cList); } JobClusterProto.InitializeJobClusterRequest req = new JobClusterProto.InitializeJobClusterRequest((JobClusterDefinitionImpl) jobClusterMeta.getJobClusterDefinition(), jobClusterMeta.isDisabled(), jobClusterMeta.getLastJobCount(), jobList, completedJobsList, "system", getSelf(), false); return jobClusterInfoManager.initializeCluster(jobClusterInfo, req, t); }) .filter(Objects::nonNull) .toBlocking() .subscribe((clusterInit) -> { logger.info("JobCluster {} inited with code {}", clusterInit.jobClusterName, clusterInit.responseCode); numJobClusterInitSuccesses.increment(); }, (error) -> { logger.warn("Exception initializing clusters {}", error.getMessage(), error); logger.error("JobClusterManagerActor had errors during initialization NOT transitioning to initialized behavior"); // getContext().become(initializedBehavior); sender.tell(new JobClustersManagerInitializeResponse(initMsg.requestId, SERVER_ERROR, "JobClustersManager inited with errors"), getSelf()); }, () -> { logger.info("JobClusterManagerActor transitioning to initialized behavior"); getContext().become(initializedBehavior); sender.tell(new JobClustersManagerInitializeResponse(initMsg.requestId, SUCCESS, "JobClustersManager successfully inited"), getSelf()); }); getTimers().startPeriodicTimer(CHECK_CLUSTERS_TIMER_KEY, new ReconcileJobCluster(), Duration.ofSeconds(checkAgainInSecs)); // kick off loading of archived jobs logger.info("Kicking off archived job load asynchronously"); jobStore.loadAllArchivedJobsAsync(); } } catch(Exception e) { logger.error("caught exception", e); sender.tell(new JobClustersManagerInitializeResponse(initMsg.requestId, SERVER_ERROR, e.getMessage()), getSelf()); } logger.info("JobClustersManagerActor:initialize ends"); } @Override public void onReconcileJobClusters(ReconcileJobCluster p) { Set<JobClusterInfo> jobClusterInfos = this.jobClusterInfoManager.getAllJobClusterInfo().values().stream() .filter((jci) -> ((jci.currentState == JobClusterInfo.JobClusterState.INITIALIZING || jci.currentState == JobClusterInfo.JobClusterState.DELETING) && (p.timeOfEnforcement.toEpochMilli() - jci.stateUpdateTime) > STATE_TRANSITION_TIMEOUT_MSECS)) .collect(Collectors.toSet()); if(jobClusterInfos.size() > 0) { logger.warn("{} JobClusters stuck in initializing/deleting state ", jobClusterInfos.size()); jobClusterInfos.stream().forEach((jci) -> { if(jci.currentState.equals(JobClusterInfo.JobClusterState.INITIALIZING)) { // retry init request logger.warn("Retrying init on JobCluster {} stuck in {} state since {}", jci.clusterName, jci.currentState, jci.stateUpdateTime); jci.stateUpdateTime = p.timeOfEnforcement.toEpochMilli(); jci.jobClusterActor.tell(jci.initRequest, getSelf()); } else { // in pending delete state logger.warn("Deregistering JobCluster {} stuck in {} state since {}", jci.clusterName, jci.currentState, jci.stateUpdateTime); jobClusterInfoManager.deregisterJobCluster(jci.clusterName); } }); } } @Override public void onJobClusterCreate(final CreateJobClusterRequest request) { final String name = request.getJobClusterDefinition().getName(); if (!jobClusterInfoManager.isClusterExists(name)) { try { Optional<JobClusterInfo> jobClusterInfoO = jobClusterInfoManager.createClusterActorAndRegister(request.getJobClusterDefinition()); if (jobClusterInfoO.isPresent()) { jobClusterInfoManager.initializeClusterAsync(jobClusterInfoO.get(), new JobClusterProto.InitializeJobClusterRequest(request.getJobClusterDefinition(), request.getUser(), getSender())); } else { getSender().tell(new CreateJobClusterResponse( request.requestId, CLIENT_ERROR, "Job Cluster " + request.getJobClusterDefinition().getName() + " could not be created due to invalid name", request.getJobClusterDefinition().getName()), getSelf()); } } catch (Exception e) { getSender().tell(new CreateJobClusterResponse( request.requestId, SERVER_ERROR, "Job Cluster " + request.getJobClusterDefinition().getName() + " could not be created due to " + e.getMessage(), request.getJobClusterDefinition().getName()), getSelf()); } } else { getSender().tell(new CreateJobClusterResponse( request.requestId, CLIENT_ERROR_CONFLICT, "Job Cluster " + request.getJobClusterDefinition().getName() + " already exists", request.getJobClusterDefinition().getName()), getSelf()); } } @Override public void onJobClusterInitializeResponse(final JobClusterProto.InitializeJobClusterResponse createResp) { logger.info("Got JobClusterInitializeResponse {}", createResp); jobClusterInfoManager.processInitializeResponse(createResp); } @Override public void onJobClusterDelete(final DeleteJobClusterRequest request) { jobClusterInfoManager.processDeleteRequest(request); } @Override public void onJobClusterDeleteResponse(final JobClusterProto.DeleteJobClusterResponse resp) { jobClusterInfoManager.processDeleteResponse(resp); } @Override public void onJobClusterUpdate(final UpdateJobClusterRequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getJobClusterDefinition().getName()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { sender.tell(new UpdateJobClusterResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getJobClusterDefinition().getName() + " doesn't exist"), getSelf()); } } @Override public void onJobClustersList(final ListJobClustersRequest request) { if(logger.isDebugEnabled()) { logger.info("In onJobClustersListRequest {}", request); } ActorRef sender = getSender(); Map<String, JobClusterInfo> jobClusterInfoMap = jobClusterInfoManager.getAllJobClusterInfo(); jobListHelperActor.tell(new JobListHelperActor.ListJobClusterRequestWrapper(request,sender,jobClusterInfoMap),getSelf()); } @Override public void onJobClusterGet(GetJobClusterRequest r) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(r.getJobClusterName()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(r, getContext()); } else { sender.tell(new GetJobClusterResponse(r.requestId, CLIENT_ERROR_NOT_FOUND, "No such Job cluster " + r.getJobClusterName(), empty()), getSelf()); } } @Override public void onGetLastSubmittedJobIdSubject(GetLastSubmittedJobIdStreamRequest r) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(r.getClusterName()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(r, getContext()); } else { sender.tell(new GetLastSubmittedJobIdStreamResponse(r.requestId, CLIENT_ERROR_NOT_FOUND, "No such Job cluster " + r.getClusterName(), empty()), getSelf()); } } @Override public void onWorkerEvent(WorkerEvent workerEvent) { if(logger.isDebugEnabled()) { logger.debug("Entering JobClusterManagerActor:onWorkerEvent {}", workerEvent); } String clusterName = workerEvent.getWorkerId().getJobCluster(); Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(clusterName); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(workerEvent, getContext()); } else { if(!JobHelper.isTerminalWorkerEvent(workerEvent)) { logger.warn("Event from Worker {} for a cluster {} that no longer exists. Terminate worker", workerEvent, workerEvent.getWorkerId().getJobCluster()); Optional<String> host = JobHelper.getWorkerHostFromWorkerEvent(workerEvent); mantisScheduler.unscheduleAndTerminateWorker(workerEvent.getWorkerId(), host); } else { logger.warn("Terminal Event from Worker {} for a cluster {} that no longer exists. Ignore worker", workerEvent, workerEvent.getWorkerId().getJobCluster()); } } } private void onTerminated(final Terminated terminated) { logger.warn("onTerminated {}", terminated.actor()); } //////////////////// JOB OPERATIONS //////////////////////////////////////////////// @Override public void onJobSubmit(final SubmitJobRequest request) { logger.info("Submitting job " + request); Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { sender.tell(new SubmitJobResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "Job Cluster " + request.getClusterName() + " doesn't exist", empty()), getSelf()); } } @Override public void onJobKillRequest(final KillJobRequest request) { logger.info("Killing job " + request); ActorRef sender = getSender(); JobId jobIdToKill = request.getJobId(); Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(jobIdToKill.getCluster()); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.tell( new JobClusterProto.KillJobRequest(request.getJobId(), request.getReason(), JobCompletedReason.Killed, request.getUser(), sender), getSelf()); } else { logger.info("Job cluster {} not found", jobIdToKill.getCluster()); sender.tell(new KillJobResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, JobState.Noop, "Job cluster " + jobIdToKill.getCluster() + " doesn't exist", jobIdToKill, request.getUser()), getSelf()); } } ////////////////////// JOB OPERATIONS END ////////////////////////////////////////////// @Override public void preStart() throws Exception { logger.info("JobClusterManager Actor started"); super.preStart(); } @Override public void postStop() throws Exception { logger.info("JobClusterManager Actor stopped"); super.postStop(); } @Override public void preRestart(Throwable t, Optional<Object> m) throws Exception { logger.info("preRestart {} (exc: {})", m, t.getMessage()); // do not kill all children, which is the default here // super.preRestart(t, m); } @Override public void postRestart(Throwable reason) throws Exception { logger.info("postRestart (exc={})", reason.getMessage()); super.postRestart(reason); } @Override public SupervisorStrategy supervisorStrategy() { // custom supervisor strategy to resume the Actor on Exception instead of the default restart return MantisActorSupervisorStrategy.getInstance().create(); } @Override public Receive createReceive() { return getInitializingBehavior(); } private void logError(Throwable e) { logger.error("Exception occurred retrieving job cluster list {}", e.getMessage()); } @Override public void onJobClusterUpdateSLA(UpdateJobClusterSLARequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { sender.tell(new UpdateJobClusterSLAResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist"), getSelf()); } } @Override public void onJobClusterUpdateArtifact(UpdateJobClusterArtifactRequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { sender.tell(new UpdateJobClusterArtifactResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist"), getSelf()); } } @Override public void onJobClusterUpdateLabels(UpdateJobClusterLabelsRequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { sender.tell(new UpdateJobClusterLabelsResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist"), getSelf()); } } @Override public void onJobClusterUpdateWorkerMigrationConfig(UpdateJobClusterWorkerMigrationStrategyRequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { sender.tell(new UpdateJobClusterWorkerMigrationStrategyResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist"), getSelf()); } } @Override public void onJobClusterEnable(EnableJobClusterRequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { sender.tell(new EnableJobClusterResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist"), getSelf()); } } @Override public void onJobClusterDisable(DisableJobClusterRequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { sender.tell(new DisableJobClusterResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist"), getSelf()); } } @Override public void onGetJobDetailsRequest(GetJobDetailsRequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getJobId().getCluster()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { sender.tell(new GetJobDetailsResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "Job " + request.getJobId().getId() + " doesn't exist", empty()), getSelf()); } } @Override public void onGetJobStatusSubject(GetJobSchedInfoRequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getJobId().getCluster()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { sender.tell(new GetJobSchedInfoResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getJobId().getCluster() + " doesn't exist", Optional.empty()), getSelf()); } } @Override public void onGetLatestJobDiscoveryInfo(GetLatestJobDiscoveryInfoRequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getJobCluster()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { sender.tell(new GetLatestJobDiscoveryInfoResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getJobCluster() + " doesn't exist", Optional.empty()), getSelf()); } } @Override public void onJobListCompleted(ListCompletedJobsInClusterRequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getClusterName()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { sender.tell(new ListCompletedJobsInClusterResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getClusterName() + " doesn't exist", Lists.newArrayList()), getSelf()); } } @Override public void onJobIdList(ListJobIdsRequest request) { if(logger.isTraceEnabled()) { logger.trace("Enter onJobIdList"); } ActorRef sender = getSender(); this.jobListHelperActor.tell(new JobListHelperActor.ListJobIdRequestWrapper(request, sender, jobClusterInfoManager.getAllJobClusterInfo()), getSelf()); if(logger.isTraceEnabled()) { logger.trace("Exit onJobIdList"); } } @Override public void onListArchivedWorkers(ListArchivedWorkersRequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getJobId().getCluster()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { getSender().tell(new ListArchivedWorkersResponse(request.requestId, CLIENT_ERROR, "Job Cluster " + request.getJobId().getCluster() + " Not found", Lists.newArrayList()), getSelf()); } } public void onListActiveWorkers(ListWorkersRequest request) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(request.getJobId().getCluster()); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(request, getContext()); } else { getSender().tell(new ListWorkersResponse(request.requestId, CLIENT_ERROR, "Job Cluster " + request.getJobId().getCluster() + " Not found", Lists.newArrayList()), getSelf()); } } @Override public void onJobList(ListJobsRequest request) { ActorRef sender = getSender(); this.jobListHelperActor.tell(new JobListHelperActor.ListJobRequestWrapper(request, sender, jobClusterInfoManager.getAllJobClusterInfo()),getSelf()); } @Override public void onScaleStage(ScaleStageRequest scaleStage) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(scaleStage.getJobId().getCluster()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(scaleStage, getContext()); } else { sender.tell(new ScaleStageResponse(scaleStage.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + scaleStage.getJobId().getCluster() + " doesn't exist",0), getSelf()); } } @Override public void onResubmitWorker(ResubmitWorkerRequest r) { Optional<JobClusterInfo> jobClusterInfo = jobClusterInfoManager.getJobClusterInfo(r.getJobId().getCluster()); ActorRef sender = getSender(); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().jobClusterActor.forward(r, getContext()); } else { sender.tell(new ResubmitWorkerResponse(r.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + r.getJobId().getCluster() + " doesn't exist"), getSelf()); } } class JobClusterInfoManager { private final Map<String, JobClusterInfo> jobClusterNameToInfoMap = new HashMap<>(); private final LifecycleEventPublisher eventPublisher; private MantisScheduler mantisScheduler; private final MantisJobStore jobStore; private final Metrics metrics; JobClusterInfoManager(MantisJobStore jobStore, MantisScheduler mantisScheduler, LifecycleEventPublisher eventPublisher) { this.eventPublisher = eventPublisher; this.mantisScheduler = mantisScheduler; this.jobStore = jobStore; MetricGroupId metricGroupId = new MetricGroupId("JobClusterInfoManager"); Metrics m = new Metrics.Builder() .id(metricGroupId) .addGauge(new GaugeCallback(metricGroupId, "jobClustersGauge", () -> 1.0 * jobClusterNameToInfoMap.size())) .build(); this.metrics = MetricsRegistry.getInstance().registerAndGet(m); } /** * Creates the job cluster Actor * Watches it * Adds it to internal map and publishes Lifecycle event * Could throw an unchecked exception if actor creation fails * @param jobClusterDefn * @return jobClusterInfo if actor creation and registration succeeds, else empty */ Optional<JobClusterInfo> createClusterActorAndRegister(IJobClusterDefinition jobClusterDefn) { String clusterName = jobClusterDefn.getName(); if(!isClusterExists(clusterName)) { if (!ActorPaths.isValidPathElement(clusterName)) { logger.error("Cannot create actor for cluster with invalid name {}", clusterName); return empty(); } ActorRef jobClusterActor = getContext().actorOf(JobClusterActor.props(clusterName, this.jobStore, this.mantisScheduler, this.eventPublisher), "JobClusterActor-" + clusterName); getContext().watch(jobClusterActor); JobClusterInfo jobClusterInfo = new JobClusterInfo(clusterName, jobClusterDefn, jobClusterActor); jobClusterNameToInfoMap.put(clusterName, jobClusterInfo); return ofNullable(jobClusterInfo); } else { return ofNullable(jobClusterNameToInfoMap.get(clusterName)); } } void deregisterJobCluster(String jobClusterName) { Optional<JobClusterInfo> jobClusterInfo = getJobClusterInfo(jobClusterName); if(jobClusterInfo.isPresent()) { jobClusterInfo.get().markDeleted(System.currentTimeMillis()); // unwatch and stop actor ActorRef jobClusterActor = jobClusterInfo.get().jobClusterActor; getContext().unwatch(jobClusterActor); getContext().stop(jobClusterActor); jobClusterNameToInfoMap.remove(jobClusterName); } else { logger.warn("Job Cluster does not exist {}", jobClusterInfo); } } Observable<JobClusterProto.InitializeJobClusterResponse> initializeCluster(JobClusterInfo jobClusterInfo, JobClusterProto.InitializeJobClusterRequest req, Duration t) { jobClusterInfo.markInitializing(req, System.currentTimeMillis()); CompletionStage<JobClusterProto.InitializeJobClusterResponse> respCS = ask(jobClusterInfo.jobClusterActor, req, t) .thenApply(JobClusterProto.InitializeJobClusterResponse.class::cast); return Observable.from(respCS.toCompletableFuture(),Schedulers.io()) .map((resp)-> { logger.info("JobCluster {} inited with code {}", resp.jobClusterName, resp.responseCode); Optional<JobClusterInfo> jClusterInfo = jobClusterInfoManager.getJobClusterInfo(resp.jobClusterName); if(resp.responseCode == SUCCESS) { jClusterInfo.ifPresent((jci) -> jci.markInitialized(System.currentTimeMillis())); } return resp; }) .onErrorResumeNext(ex -> { logger.warn("caught exception {}", ex.getMessage(), ex); numJobClusterInitFailures.increment(); // initialization fails deregister cluster deregisterJobCluster(jobClusterInfo.clusterName); return Observable.just(new JobClusterProto.InitializeJobClusterResponse(req.requestId, BaseResponse.ResponseCode.SERVER_ERROR,ex.getMessage(), jobClusterInfo.clusterName, ActorRef.noSender())); }); } void initializeClusterAsync(JobClusterInfo jobClusterInfo, JobClusterProto.InitializeJobClusterRequest req) { jobClusterInfo.markInitializing(req,System.currentTimeMillis()); jobClusterInfo.jobClusterActor.tell(req, getSelf()); } Optional<JobClusterInfo> getJobClusterInfo(String jobClusterName) { return ofNullable(jobClusterNameToInfoMap.get(jobClusterName)); } Map<String, JobClusterInfo> getAllJobClusterInfo() { return Collections.unmodifiableMap(jobClusterNameToInfoMap); } boolean isClusterExists(String clusterName) { return jobClusterNameToInfoMap.containsKey(clusterName); } void processInitializeResponse(JobClusterProto.InitializeJobClusterResponse createResp) { Optional<JobClusterInfo> jClusterInfo = getJobClusterInfo(createResp.jobClusterName); if(jClusterInfo.isPresent()) { JobClusterInfo jobClusterInfo = jClusterInfo.get(); if(createResp.responseCode == SUCCESS) { jobClusterInfo.markInitialized(System.currentTimeMillis()); createResp.requestor.tell( new CreateJobClusterResponse(createResp.requestId, SUCCESS_CREATED, createResp.jobClusterName + " created", createResp.jobClusterName), getSelf()); } else if( createResp.responseCode == SERVER_ERROR){ deregisterJobCluster(createResp.jobClusterName); createResp.requestor.tell(new CreateJobClusterResponse(createResp.requestId, createResp.responseCode, createResp.message, createResp.jobClusterName), getSelf()); } } else { logger.warn("Received JobClusterInitializeResponse {} for unknown Job Cluster {}", createResp, createResp.jobClusterName); } } void processDeleteRequest(DeleteJobClusterRequest request) { Optional<JobClusterInfo> jobClusterInfoOp= getJobClusterInfo(request.getName()); ActorRef sender = getSender(); if (jobClusterInfoOp.isPresent()) { JobClusterInfo jobClusterInfo = jobClusterInfoOp.get(); jobClusterInfo.jobClusterActor.tell( new JobClusterProto.DeleteJobClusterRequest(request.getUser(), request.getName(), sender), getSelf()); jobClusterInfo.markDeleting(System.currentTimeMillis()); } else { sender.tell( new DeleteJobClusterResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "JobCluster " + request.getName() + " doesn't exist"), getSelf()); } } void processDeleteResponse(JobClusterProto.DeleteJobClusterResponse resp) { Optional<JobClusterInfo> jobClusterInfoOp= getJobClusterInfo(resp.clusterName); if(jobClusterInfoOp.isPresent()) { if(resp.responseCode == SUCCESS) { deregisterJobCluster(resp.clusterName); } } else { // No Such job cluster ignore logger.warn("Received delete job cluster response {} for unknown job cluster {}", resp, resp.clusterName); } // inform caller resp.requestingActor.tell( new DeleteJobClusterResponse(resp.requestId, resp.responseCode, resp.message) , getSelf()); } } static class JobClusterInfo { private static final Logger logger = LoggerFactory.getLogger(JobClusterInfo.class); public enum JobClusterState { UNINITIALIZED, INITIALIZING, INITIALIZED, DELETING, DELETED} private JobClusterProto.InitializeJobClusterRequest initRequest; final String clusterName; final ActorRef jobClusterActor; private volatile JobClusterState currentState; volatile long stateUpdateTime; final IJobClusterDefinition jobClusterDefinition; JobClusterInfo(final String clusterName, final IJobClusterDefinition clusterDefn, final ActorRef actor) { this.clusterName = clusterName; this.jobClusterActor = actor; this.jobClusterDefinition = clusterDefn; this.currentState = JobClusterState.UNINITIALIZED; this.stateUpdateTime = System.currentTimeMillis(); } public String getClusterName() { return clusterName; } public IJobClusterDefinition getJobClusterDefinition() { return jobClusterDefinition; } void markInitializing(JobClusterProto.InitializeJobClusterRequest req, long time) { if(currentState == JobClusterState.UNINITIALIZED) { this.stateUpdateTime = time; currentState = JobClusterState.INITIALIZING; initRequest = req; } else { logger.warn("Invalid state transition from {} to {} for job cluster {}", currentState, JobClusterState.INITIALIZING, clusterName); } } void markInitialized(long time) { if(currentState == JobClusterState.INITIALIZING ) { this.stateUpdateTime = time; this.currentState = JobClusterState.INITIALIZED; } else { logger.warn("Invalid state transition from {} to {} for job cluster {}", currentState, JobClusterState.INITIALIZED, clusterName); } } void markDeleting(long time) { this.currentState = JobClusterState.DELETING; this.stateUpdateTime = time; } void markDeleted(long time) { this.currentState = JobClusterState.DELETED; this.stateUpdateTime = time; } @Override public String toString() { return "JobClusterInfo{" + "clusterName='" + clusterName + '\'' + ", jobClusterActor=" + jobClusterActor + ", currentState=" + currentState + ", stateUpdateTime=" + stateUpdateTime + ", jobClusterDefinition=" + jobClusterDefinition + '}'; } } }
4,358
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/IJobClustersManager.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.CreateJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DeleteJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.KillJobRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobClustersRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterProto.DeleteJobClusterResponse; import io.mantisrx.master.jobcluster.proto.JobClusterProto.InitializeJobClusterResponse; import io.mantisrx.master.jobcluster.proto.JobClusterProto.KillJobResponse; import io.mantisrx.server.master.scheduler.WorkerEvent; public interface IJobClustersManager { // cluster related messages void onJobClusterCreate(CreateJobClusterRequest request); void onJobClusterInitializeResponse(InitializeJobClusterResponse createResp); void onJobClusterDelete(DeleteJobClusterRequest request); void onJobClusterDeleteResponse(DeleteJobClusterResponse resp); void onJobClusterUpdate(UpdateJobClusterRequest request); void onJobClusterUpdateSLA(JobClusterManagerProto.UpdateJobClusterSLARequest r); void onJobClusterUpdateArtifact(JobClusterManagerProto.UpdateJobClusterArtifactRequest r); void onJobClusterUpdateLabels(JobClusterManagerProto.UpdateJobClusterLabelsRequest r); void onJobClusterUpdateWorkerMigrationConfig(JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest r); void onJobClustersList(ListJobClustersRequest request); void onJobClusterGet(JobClusterManagerProto.GetJobClusterRequest r); void onJobClusterEnable(JobClusterManagerProto.EnableJobClusterRequest r); void onJobClusterDisable(JobClusterManagerProto.DisableJobClusterRequest r); void onGetJobStatusSubject(JobClusterManagerProto.GetJobSchedInfoRequest request); void onGetLatestJobDiscoveryInfo(JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest request); void onJobListCompleted(JobClusterManagerProto.ListCompletedJobsInClusterRequest r); void onJobList(ListJobsRequest request); void onJobIdList(ListJobIdsRequest request); // worker related messages void onGetLastSubmittedJobIdSubject(JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest request); void onWorkerEvent(WorkerEvent r); // Job related messages void onJobSubmit(JobClusterManagerProto.SubmitJobRequest request); void onJobKillRequest(KillJobRequest request); void onGetJobDetailsRequest(JobClusterManagerProto.GetJobDetailsRequest request); void onScaleStage(ScaleStageRequest scaleStage); void onResubmitWorker(ResubmitWorkerRequest r); void onListArchivedWorkers(ListArchivedWorkersRequest request); void onListActiveWorkers(JobClusterManagerProto.ListWorkersRequest request); void onReconcileJobClusters(JobClusterManagerProto.ReconcileJobCluster p); }
4,359
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/DeadLetterActor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master; import akka.actor.AbstractActor; import akka.actor.DeadLetter; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class DeadLetterActor extends AbstractActor { private final Logger log = LoggerFactory.getLogger(DeadLetterActor.class); private final Metrics metrics; private final Counter numDeadLetterMsgs; public DeadLetterActor() { Metrics m = new Metrics.Builder() .id("DeadLetterActor") .addCounter("numDeadLetterMsgs") .build(); this.metrics = MetricsRegistry.getInstance().registerAndGet(m); this.numDeadLetterMsgs = metrics.getCounter("numDeadLetterMsgs"); } @Override public Receive createReceive() { return receiveBuilder() .match(DeadLetter.class, msg -> { this.numDeadLetterMsgs.increment(); String m = msg.message().toString(); log.info("Dead Letter from {} to {} msg:{}", msg.sender(), msg.recipient(), m.substring(0, Math.min(250, m.length() - 1))); }) .build(); } }
4,360
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/zk/LeaderElector.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.zk; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.server.core.BaseService; import io.mantisrx.server.master.ILeadershipManager; import io.netty.util.concurrent.DefaultThreadFactory; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.recipes.leader.LeaderLatch; import org.apache.curator.framework.recipes.leader.LeaderLatchListener; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.concurrent.Executors; import static org.apache.zookeeper.KeeperException.Code.*; public class LeaderElector extends BaseService { private static final Logger logger = LoggerFactory.getLogger(LeaderElector.class); private volatile boolean started = false; private final ObjectMapper jsonMapper; private final ILeadershipManager leadershipManager; private final LeaderLatch leaderLatch; private final CuratorFramework curator; private final String electionPath; // The path where a selected leader announces itself. private final String leaderPath; private LeaderElector(ObjectMapper jsonMapper, ILeadershipManager leadershipManager, CuratorFramework curator, String electionPath, String leaderPath) { super(false); this.jsonMapper = jsonMapper; this.leadershipManager = leadershipManager; this.curator = curator; this.leaderLatch = createNewLeaderLatch(electionPath); this.electionPath = electionPath; this.leaderPath = leaderPath; } @Override public void start() { if (started) { return; } started = true; try { Stat pathStat = curator.checkExists().forPath(leaderPath); // Create the path only if the path does not exist if(pathStat == null) { curator.create() .creatingParentsIfNeeded() .withMode(CreateMode.PERSISTENT) .forPath(leaderPath); } leaderLatch.start(); } catch (Exception e) { throw new IllegalStateException("Failed to create a leader elector for master: "+e.getMessage(), e); } } @Override public void shutdown() { try { leaderLatch.close(); } catch (IOException e) { logger.warn("Failed to close the leader latch: "+e.getMessage(), e); }finally { started = false; } } private LeaderLatch createNewLeaderLatch(String leaderPath) { final LeaderLatch newLeaderLatch = new LeaderLatch(curator, leaderPath, "127.0.0.1"); newLeaderLatch.addListener( new LeaderLatchListener() { @Override public void isLeader() { announceLeader(); } @Override public void notLeader() { leadershipManager.stopBeingLeader(); } }, Executors.newSingleThreadExecutor(new DefaultThreadFactory("MasterLeader-%s"))); return newLeaderLatch; } private void announceLeader() { try { logger.info("Announcing leader"); byte[] masterDescription = jsonMapper.writeValueAsBytes(leadershipManager.getDescription()); // There is no need to lock anything because we ensure only leader will write to the leader path curator .setData() .inBackground((client, event) -> { if (event.getResultCode() == OK.intValue()) { leadershipManager.becomeLeader(); } else { logger.warn("Failed to elect leader from path {} with event {}", leaderPath, event); } }).forPath(leaderPath, masterDescription); } catch (Exception e) { throw new RuntimeException("Failed to announce leader: "+e.getMessage(), e); } } public static LeaderElector.Builder builder(ILeadershipManager manager) { return new LeaderElector.Builder(manager); } public static class Builder { private ObjectMapper jsonMapper; private ILeadershipManager leadershipManager; private CuratorFramework curator; private String electionPath; private String announcementPath; public Builder(ILeadershipManager leadershipManager){ this.leadershipManager = leadershipManager; } public LeaderElector.Builder withJsonMapper(ObjectMapper jsonMapper) { this.jsonMapper = jsonMapper; return this; } public LeaderElector.Builder withCurator(CuratorFramework curator) { this.curator = curator; return this; } public LeaderElector.Builder withElectionPath(String path) { this.electionPath = path; return this; } public LeaderElector.Builder withAnnouncementPath(String annPath) { this.announcementPath = annPath; return this; } public LeaderElector build() { return new LeaderElector(jsonMapper, leadershipManager, curator, electionPath, announcementPath); } } }
4,361
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/scheduler/ScheduleRequest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.scheduler; import com.netflix.fenzo.ConstraintEvaluator; import com.netflix.fenzo.VMTaskFitnessCalculator; import com.netflix.fenzo.queues.QAttributes; import com.netflix.fenzo.queues.QueuableTask; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.server.core.domain.JobMetadata; import io.mantisrx.server.core.domain.WorkerId; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; public class ScheduleRequest implements QueuableTask { public static final QAttributes DEFAULT_Q_ATTRIBUTES = new QAttributes() { @Override public String getBucketName() { return "default"; } @Override public int getTierNumber() { return 0; } }; private static final String defaultGrpName = "defaultGrp"; private final WorkerId workerId; private final int stageNum; private final int numPortsRequested; private final JobMetadata jobMetadata; private final MantisJobDurationType durationType; private final MachineDefinition machineDefinition; private final List<ConstraintEvaluator> hardConstraints; private final List<VMTaskFitnessCalculator> softConstraints; private volatile long readyAt; private final Optional<String> preferredCluster; public ScheduleRequest(final WorkerId workerId, final int stageNum, final int numPortsRequested, final JobMetadata jobMetadata, final MantisJobDurationType durationType, final MachineDefinition machineDefinition, final List<ConstraintEvaluator> hardConstraints, final List<VMTaskFitnessCalculator> softConstraints, final long readyAt, final Optional<String> preferredCluster) { this.workerId = workerId; this.stageNum = stageNum; this.numPortsRequested = numPortsRequested; this.jobMetadata = jobMetadata; this.durationType = durationType; this.machineDefinition = machineDefinition; this.hardConstraints = hardConstraints; this.softConstraints = softConstraints; this.readyAt = readyAt; this.preferredCluster = preferredCluster; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ScheduleRequest that = (ScheduleRequest) o; return workerId != null ? workerId.equals(that.workerId) : that.workerId == null; } @Override public int hashCode() { return workerId != null ? workerId.hashCode() : 0; } @Override public String getId() { return workerId.getId(); } public WorkerId getWorkerId() { return workerId; } @Override public String taskGroupName() { return defaultGrpName; } @Override public double getCPUs() { return machineDefinition.getCpuCores(); } @Override public double getMemory() { return machineDefinition.getMemoryMB(); } @Override public double getNetworkMbps() { return machineDefinition.getNetworkMbps(); } @Override public double getDisk() { return machineDefinition.getDiskMB(); } @Override public int getPorts() { return numPortsRequested; } public JobMetadata getJobMetadata() { return jobMetadata; } public MachineDefinition getMachineDefinition() { return machineDefinition; } @Override public Map<String, Double> getScalarRequests() { return Collections.emptyMap(); } @Override public Map<String, NamedResourceSetRequest> getCustomNamedResources() { return Collections.emptyMap(); } @Override public List<ConstraintEvaluator> getHardConstraints() { return hardConstraints; } @Override public List<VMTaskFitnessCalculator> getSoftConstraints() { return softConstraints; } @Override public void setAssignedResources(AssignedResources assignedResources) { // no-op Not using them at this time } @Override public AssignedResources getAssignedResources() { // not used by Mantis return null; } public MantisJobDurationType getDurationType() { return durationType; } public int getStageNum() { return stageNum; } @Override public QAttributes getQAttributes() { return DEFAULT_Q_ATTRIBUTES; } @Override public long getReadyAt() { return readyAt; } @Override public void safeSetReadyAt(long when) { readyAt = when; } public Optional<String> getPreferredCluster() { return preferredCluster; } @Override public String toString() { return "ScheduleRequest{" + "workerId=" + workerId + ", readyAt=" + readyAt + '}'; } }
4,362
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/scheduler/AgentsErrorMonitorActor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.scheduler; import akka.actor.AbstractActorWithTimers; import akka.actor.ActorRef; import akka.actor.Props; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.master.events.LifecycleEventsProto; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.server.master.scheduler.MantisScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.functions.Action1; import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.TimeUnit; import static java.util.Optional.empty; import static java.util.Optional.of; public class AgentsErrorMonitorActor extends AbstractActorWithTimers implements IAgentsErrorMonitor { private final Logger logger = LoggerFactory.getLogger(AgentsErrorMonitorActor.class); private static final long ERROR_CHECK_WINDOW_MILLIS=120000; // 2 mins private static final int ERROR_CHECK_WINDOW_COUNT=3; private static final long TOO_OLD_MILLIS = 3600000; private static final long DISABLE_DURATION_MILLIS = 60*1000; // 1mins private Action1<String> slaveEnabler = s -> logger.warn("SlaveEnabler not initialized yet!"); private Action1<String> slaveDisabler = s -> logger.warn("SlaveDisabler not initialized yet!"); private long too_old_mills; private int error_check_window_count; private long error_check_window_millis; private long disableDurationMillis; private final Map<String, HostErrors> hostErrorMap = new HashMap<>(); private static final String CHECK_HOST_TIMER_KEY = "CHECK_HOST"; private Optional<MantisScheduler> mantisSchedulerOptional = empty(); // Behavior after being initialized Receive initializedBehavior; public static Props props(long too_old_millis, int error_check_window_count, long error_check_window_millis, long disableDurationMillis) { return Props.create(AgentsErrorMonitorActor.class, too_old_millis, error_check_window_count,error_check_window_millis, disableDurationMillis); } public static Props props() { return Props.create(AgentsErrorMonitorActor.class, TOO_OLD_MILLIS, ERROR_CHECK_WINDOW_COUNT,ERROR_CHECK_WINDOW_MILLIS, DISABLE_DURATION_MILLIS); } public AgentsErrorMonitorActor() { this(TOO_OLD_MILLIS,ERROR_CHECK_WINDOW_COUNT,ERROR_CHECK_WINDOW_MILLIS, DISABLE_DURATION_MILLIS); } public AgentsErrorMonitorActor(long too_old_millis, int error_check_window_count, long error_check_window_millis, long disableDurationMillis) { this.too_old_mills = (too_old_millis>0)? too_old_millis : TOO_OLD_MILLIS; this.error_check_window_count = (error_check_window_count>0)? error_check_window_count : ERROR_CHECK_WINDOW_COUNT; this.error_check_window_millis = (error_check_window_millis>1000)? error_check_window_millis : ERROR_CHECK_WINDOW_MILLIS; this.disableDurationMillis = (disableDurationMillis>-1) ? disableDurationMillis : DISABLE_DURATION_MILLIS; this.initializedBehavior = receiveBuilder() .match(LifecycleEventsProto.WorkerStatusEvent.class, js -> onWorkerEvent(js)) .match(CheckHostHealthMessage.class, js -> onCheckHostHealth()) .match(HostErrorMapRequest.class, js -> onHostErrorMapRequest()) .matchAny(x -> logger.warn("unexpected message '{}' received by AgentsErrorMonitorActor actor ", x)) .build(); } @Override public Receive createReceive() { return receiveBuilder() .match(InitializeAgentsErrorMonitor.class, js -> onInitialize(js)) .matchAny(x -> logger.warn("unexpected message '{}' received by AgentsErrorMonitorActor actor ", x)) .build(); } public void onInitialize(InitializeAgentsErrorMonitor initializeAgentsErrorMonitor) { this.mantisSchedulerOptional = of(initializeAgentsErrorMonitor.getScheduler()); slaveDisabler = hostName -> mantisSchedulerOptional.get().disableVM(hostName,disableDurationMillis); slaveEnabler = hostName -> mantisSchedulerOptional.get().enableVM(hostName); getContext().become(initializedBehavior); getTimers().startPeriodicTimer(CHECK_HOST_TIMER_KEY, new CheckHostHealthMessage(), scala.concurrent.duration.Duration.create(error_check_window_millis, TimeUnit.MILLISECONDS)); } @Override public void onCheckHostHealth() { Instant currentTime = Instant.now(); Iterator<HostErrors> it = hostErrorMap.values().iterator(); while(it.hasNext()) { HostErrors hErrors = it.next(); long lastActivityAt = hErrors.getLastActivityAt(); long timeSinceLastEvent = currentTime.toEpochMilli() - lastActivityAt; if(timeSinceLastEvent > this.too_old_mills) { logger.debug("No Events from host since {} evicting", timeSinceLastEvent); it.remove(); } } } @Override public void onWorkerEvent(LifecycleEventsProto.WorkerStatusEvent workerEvent) { if(logger.isTraceEnabled()) { logger.trace("onWorkerEvent " + workerEvent + " is error state " + WorkerState.isErrorState(workerEvent.getWorkerState())); } if(workerEvent.getHostName().isPresent() && WorkerState.isErrorState(workerEvent.getWorkerState())) { String hostName = workerEvent.getHostName().get(); logger.info("Registering worker error on host {}", hostName); HostErrors hostErrors = hostErrorMap.computeIfAbsent(hostName, (hName) -> new HostErrors(hName,slaveEnabler,this.error_check_window_millis,this.error_check_window_count)); if(hostErrors.addAndGetIsTooManyErrors(workerEvent)) { logger.warn("Host {} has too many errors in a short duration, disabling..", hostName); this.slaveDisabler.call(hostName); } } } @Override public void onHostErrorMapRequest() { ActorRef sender = getSender(); sender.tell(new HostErrorMapResponse(Collections.unmodifiableMap(this.hostErrorMap)), getSelf()); } public static class InitializeAgentsErrorMonitor { private final MantisScheduler scheduler; public InitializeAgentsErrorMonitor(final MantisScheduler scheduler) { Preconditions.checkNotNull(scheduler, "MantisScheduler cannot be null"); this.scheduler = scheduler; } public MantisScheduler getScheduler() { return this.scheduler; } } static class CheckHostHealthMessage { long now = -1; public CheckHostHealthMessage() { } public CheckHostHealthMessage(long now) { this.now = now; } public long getCurrentTime() { if(now == -1) { return System.currentTimeMillis(); } else { return this.now; } } } static class HostErrorMapRequest { } static class HostErrorMapResponse { private final Map<String, HostErrors> errorMap; public HostErrorMapResponse(final Map<String, HostErrors> hostErrorsMap) { this.errorMap = hostErrorsMap; } public Map<String,HostErrors> getMap() { return this.errorMap; } } static class HostErrors { private static final Logger logger = LoggerFactory.getLogger(HostErrors.class); private final String hostname; private final List<Long> errors; private long lastActivityAt = System.currentTimeMillis(); private final Action1<String> slaveEnabler; private final long error_check_window_millis; private final int windowCount; HostErrors(String hostname, Action1<String> slaveEnabler, long error_check_window_millis, int windowCount) { this.hostname = hostname; this.errors = new ArrayList<>(); this.slaveEnabler = slaveEnabler; this.error_check_window_millis = error_check_window_millis; this.windowCount = windowCount; } long getLastActivityAt() { return lastActivityAt; } boolean addAndGetIsTooManyErrors(LifecycleEventsProto.WorkerStatusEvent status) { logger.info("InaddGetisTooManyErrors for host {}", hostname); lastActivityAt = status.getTimestamp(); if(WorkerState.isErrorState(status.getWorkerState())) { errors.add(lastActivityAt); logger.info("Registering error {}", errors); } else if(status.getWorkerState() == WorkerState.Started) { // saw a successfull worker start and error list is not empty clear it and reenable host if(!errors.isEmpty()) { errors.clear(); logger.info("{} cleared of errors, reenabling host ", hostname); slaveEnabler.call(hostname); } } final Iterator<Long> iterator = errors.iterator(); while(iterator.hasNext()) { final long next = iterator.next(); // purge old events (rolling window) if((lastActivityAt - next) > error_check_window_millis) iterator.remove(); } logger.info("No of errors in window is {} ", errors.size()); return errors.size() > windowCount; } List<Long> getErrorTimestampList() { return Collections.unmodifiableList(errors); } } }
4,363
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/scheduler/ConstraintsEvaluators.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.scheduler; // //import com.netflix.fenzo.AsSoftConstraint; //import com.netflix.fenzo.ConstraintEvaluator; //import com.netflix.fenzo.VMTaskFitnessCalculator; //import com.netflix.fenzo.plugins.BalancedHostAttrConstraint; //import com.netflix.fenzo.plugins.ExclusiveHostConstraint; //import com.netflix.fenzo.plugins.UniqueHostAttrConstraint; //import io.mantisrx.runtime.JobConstraints; //import io.mantisrx.server.master.config.ConfigurationProvider; //import io.mantisrx.server.master.ClusterAffinityConstraint; //import org.slf4j.Logger; //import org.slf4j.LoggerFactory; //import com.netflix.fenzo.functions.Func1; // //import java.util.Set; // //public class ConstraintsEvaluators { // // private static final String MANTISAGENT_MAIN_M4 = "mantisagent-main-m4"; // private static final int EXPECTED_NUM_ZONES = 3; // private static ExclusiveHostConstraint exclusiveHostConstraint = new ExclusiveHostConstraint(); // private static final Logger logger = LoggerFactory.getLogger(ConstraintsEvaluators.class); // // public static ConstraintEvaluator hardConstraint(JobConstraints constraint, final Set<String> coTasks) { // switch (constraint) { // case ExclusiveHost: // return exclusiveHostConstraint; // case UniqueHost: // return new UniqueHostAttrConstraint(new Func1<String, Set<String>>() { // @Override // public Set<String> call(String s) { // return coTasks; // } // }); // case ZoneBalance: // return new BalancedHostAttrConstraint(new Func1<String, Set<String>>() { // @Override // public Set<String> call(String s) { // return coTasks; // } // }, zoneAttributeName(), EXPECTED_NUM_ZONES); // case M4Cluster: // return new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M4); // default: // logger.error("Unknown job hard constraint " + constraint); // return null; // } // } // // private static String asgAttributeName() { // return ConfigurationProvider.getConfig().getActiveSlaveAttributeName(); // } // // private static String zoneAttributeName() { // return ConfigurationProvider.getConfig().getHostZoneAttributeName(); // } // // public static VMTaskFitnessCalculator softConstraint(JobConstraints constraint, final Set<String> coTasks) { // switch (constraint) { // case ExclusiveHost: // return AsSoftConstraint.get(exclusiveHostConstraint); // case UniqueHost: // return AsSoftConstraint.get(new UniqueHostAttrConstraint(new Func1<String, Set<String>>() { // @Override // public Set<String> call(String s) { // return coTasks; // } // })); // case ZoneBalance: // return new BalancedHostAttrConstraint(new Func1<String, Set<String>>() { // @Override // public Set<String> call(String s) { // return coTasks; // } // }, zoneAttributeName(), EXPECTED_NUM_ZONES).asSoftConstraint(); // case M4Cluster: // return AsSoftConstraint.get(new ClusterAffinityConstraint(asgAttributeName(), MANTISAGENT_MAIN_M4)); // default: // logger.error("Unknown job soft constraint " + constraint); // return null; // } // } //}
4,364
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/scheduler/IAgentsErrorMonitor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.scheduler; import io.mantisrx.master.events.LifecycleEventsProto; public interface IAgentsErrorMonitor { void onCheckHostHealth(); void onWorkerEvent(LifecycleEventsProto.WorkerStatusEvent workerEvent); void onHostErrorMapRequest(); }
4,365
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/scheduler/WorkerStateAdapter.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.scheduler; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.server.master.jobmgmt.MantisJobStateAdapter; import io.mantisrx.server.master.scheduler.WorkerResourceStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class WorkerStateAdapter { private static final Logger logger = LoggerFactory.getLogger(MantisJobStateAdapter.class); // Mark constructor private as this class is not intended to be instantiated private WorkerStateAdapter() {} public static WorkerState from(final WorkerResourceStatus.VMResourceState resourceState) { final WorkerState state; switch (resourceState) { case START_INITIATED: state = WorkerState.StartInitiated; break; case STARTED: state = WorkerState.Started; break; case FAILED: state = WorkerState.Failed; break; case COMPLETED: state = WorkerState.Completed; break; default: logger.error("Missing WorkerState mapping for VMResourceState {}", resourceState); throw new IllegalArgumentException("unknown enum value for VMResourceState " + resourceState); } return state; } }
4,366
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/scheduler/JobMessageRouterImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.scheduler; import akka.actor.ActorRef; import io.mantisrx.server.master.scheduler.JobMessageRouter; import io.mantisrx.server.master.scheduler.WorkerEvent; public class JobMessageRouterImpl implements JobMessageRouter { final ActorRef jobClusterManagerRef; public JobMessageRouterImpl(final ActorRef jobClusterManagerActorRef) { this.jobClusterManagerRef = jobClusterManagerActorRef; } @Override public boolean routeWorkerEvent(final WorkerEvent workerEvent) { jobClusterManagerRef.tell(workerEvent, ActorRef.noSender()); /* TODO - need a return value to indicate to scheduling service if the worker was marked Launched successfully from the Job Management perspective, only then the Task is dispatched to Mesos. If this method returns false for the WorkerLaunched event, no mesos task is launched. The return value would have to be async to work with the Actor model unless we use the Ask pattern here. */ return true; } }
4,367
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/utils/CaffeineMetrics.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.utils; import com.github.benmanes.caffeine.cache.stats.CacheStats; import com.github.benmanes.caffeine.cache.stats.StatsCounter; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Gauge; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import java.util.concurrent.TimeUnit; import static java.util.Objects.requireNonNull; public final class CaffeineMetrics implements StatsCounter { private final Counter hitCount; private final Counter missCount; private final Counter loadSuccessCount; private final Counter loadFailureCount; // TODO make totalLoadTime a Timer private final Gauge totalLoadTime; private final Counter evictionCount; private final Counter evictionWeight; /** * Constructs an instance for use by a single cache. */ public CaffeineMetrics(String metricGroup) { requireNonNull(metricGroup); Metrics m = new Metrics.Builder() .id("CaffeineMetrics_" + metricGroup) .addCounter("hits") .addCounter("misses") .addGauge("loadTimeMillis") .addCounter("loadsSuccess") .addCounter("loadsFailure") .addCounter("evictions") .addCounter("evictionsWeight") .build(); Metrics metrics = MetricsRegistry.getInstance().registerAndGet(m); hitCount = metrics.getCounter("hits"); missCount = metrics.getCounter("misses"); totalLoadTime = metrics.getGauge("loadTimeMillis"); loadSuccessCount = metrics.getCounter("loadsSuccess"); loadFailureCount = metrics.getCounter("loadsFailure"); evictionCount = metrics.getCounter("evictions"); evictionWeight = metrics.getCounter("evictionsWeight"); } @Override public void recordHits(int count) { hitCount.increment(count); } @Override public void recordMisses(int count) { missCount.increment(count); } @Override public void recordLoadSuccess(long loadTime) { loadSuccessCount.increment(); totalLoadTime.set(TimeUnit.MILLISECONDS.convert(loadTime, TimeUnit.NANOSECONDS)); } @Override public void recordLoadFailure(long loadTime) { loadFailureCount.increment(); totalLoadTime.set(TimeUnit.MILLISECONDS.convert(loadTime, TimeUnit.NANOSECONDS)); } @Override @SuppressWarnings("deprecation") public void recordEviction() { // This method is scheduled for removal in version 3.0 in favor of recordEviction(weight) recordEviction(1); } @Override public void recordEviction(int weight) { evictionCount.increment(); evictionWeight.increment(weight); } @Override public CacheStats snapshot() { return new CacheStats( hitCount.value(), missCount.value(), loadSuccessCount.value(), loadFailureCount.value(), totalLoadTime.value(), evictionCount.value(), evictionWeight.value()); } @Override public String toString() { return snapshot().toString(); } }
4,368
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/IJobClusterManager.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListCompletedJobsInClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLARequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest; import io.mantisrx.master.jobcluster.proto.JobClusterProto; import io.mantisrx.master.jobcluster.proto.JobClusterProto.DeleteJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterProto.EnforceSLARequest; import io.mantisrx.master.jobcluster.proto.JobClusterProto.ExpireOldJobsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterProto.InitializeJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterProto.JobStartedEvent; import io.mantisrx.master.jobcluster.proto.JobClusterProto.KillJobRequest; import io.mantisrx.master.jobcluster.proto.JobClusterProto.KillJobResponse; import io.mantisrx.master.jobcluster.proto.JobClusterProto.TriggerCronRequest; import io.mantisrx.master.jobcluster.proto.JobProto.JobInitialized; import io.mantisrx.server.master.scheduler.WorkerEvent; /** * Declares the behavior for Job Cluster Manager. */ public interface IJobClusterManager { void onJobClusterInitialize(InitializeJobClusterRequest initReq); void onJobClusterUpdate(UpdateJobClusterRequest request); void onJobClusterDelete(DeleteJobClusterRequest request); void onJobList(ListJobsRequest request); void onJobListCompleted(ListCompletedJobsInClusterRequest request); void onJobClusterDisable(DisableJobClusterRequest req); void onJobClusterEnable(EnableJobClusterRequest req); void onJobClusterGet(GetJobClusterRequest request); void onJobSubmit(SubmitJobRequest request); void onJobInitialized(JobInitialized jobInited); void onJobStarted(JobStartedEvent startedEvent); void onWorkerEvent(WorkerEvent r); void onJobKillRequest(KillJobRequest req); void onResubmitWorkerRequest(ResubmitWorkerRequest req); void onKillJobResponse(KillJobResponse killJobResponse); void onGetJobDetailsRequest(GetJobDetailsRequest req); void onGetLatestJobDiscoveryInfo(JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest request); void onGetJobStatusSubject(JobClusterManagerProto.GetJobSchedInfoRequest request); void onGetLastSubmittedJobIdSubject(JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest request); void onEnforceSLARequest(EnforceSLARequest request); void onBookkeepingRequest(JobClusterProto.BookkeepingRequest request); void onJobClusterUpdateSLA(UpdateJobClusterSLARequest slaRequest); void onJobClusterUpdateLabels(UpdateJobClusterLabelsRequest labelRequest); void onJobClusterUpdateArtifact(UpdateJobClusterArtifactRequest artifactReq); void onJobClusterUpdateWorkerMigrationConfig(UpdateJobClusterWorkerMigrationStrategyRequest req); void onScaleStage(ScaleStageRequest scaleStage); void onResubmitWorker(ResubmitWorkerRequest r); void onJobIdList(ListJobIdsRequest request); void onExpireOldJobs(ExpireOldJobsRequest request); void onListArchivedWorkers(ListArchivedWorkersRequest request); void onListActiveWorkers(JobClusterManagerProto.ListWorkersRequest request); void onTriggerCron(TriggerCronRequest request); }
4,369
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/LabelManager.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; import io.mantisrx.common.Label; import io.mantisrx.runtime.command.InvalidJobException; import io.mantisrx.server.master.domain.JobDefinition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class LabelManager { public enum SystemLabels { MANTIS_IS_RESUBMIT_LABEL("_mantis.isResubmit"), MANTIS_ARTIFACT_LABEL("_mantis.artifact"), MANTIS_VERSION_LABEL("_mantis.version"), MANTIS_SUBMITTER_LABEL("_mantis.submitter"), MANTIS_OWNER_EMAIL_LABEL("_mantis.ownerEmail"), MANTIS_CRITIALITY_LABEL("_mantis.criticality"), MANTIS_DATA_ORIGIN_LABEL("_mantis.dataOrigin"), MANTIS_JOB_TYPE_LABEL("_mantis.jobType"); public final String label; SystemLabels(String s) { this.label = s; } }; private static final Logger logger = LoggerFactory.getLogger(LabelManager.class); static int numberOfMandatoryLabels() { return 2; } static JobDefinition insertSystemLabels(JobDefinition resolvedJobDefn, boolean autoResubmit) { JobDefinition updatedJobDefn = resolvedJobDefn; if(autoResubmit) { updatedJobDefn = insertAutoResubmitLabel(resolvedJobDefn); } String artifactName = updatedJobDefn.getArtifactName(); String version = updatedJobDefn.getVersion(); List<Label> labels = updatedJobDefn.getLabels(); // remove old artifact & version label if present. List<Label> updatedLabels = labels.stream() .filter(label -> !(label.getName().equals(SystemLabels.MANTIS_ARTIFACT_LABEL.label))) .filter(label -> !label.getName().equals(SystemLabels.MANTIS_VERSION_LABEL.label)) .collect(Collectors.toList()); updatedLabels.add(new Label(SystemLabels.MANTIS_ARTIFACT_LABEL.label, artifactName)); updatedLabels.add(new Label(SystemLabels.MANTIS_VERSION_LABEL.label, version)); try { updatedJobDefn = new JobDefinition.Builder().from(updatedJobDefn) .withLabels(updatedLabels).build(); return updatedJobDefn; } catch (InvalidJobException e) { logger.error(e.getMessage()); return resolvedJobDefn; } } static JobDefinition insertAutoResubmitLabel(JobDefinition resolvedJobDefn) { List<Label> labels = resolvedJobDefn.getLabels(); boolean alreadyHasResubmitLabel = labels.stream().anyMatch( label -> label.getName().equals(SystemLabels.MANTIS_IS_RESUBMIT_LABEL.label)); if(!alreadyHasResubmitLabel) { List<Label> updatedLabels = new ArrayList<>(labels); updatedLabels.add(new Label(SystemLabels.MANTIS_IS_RESUBMIT_LABEL.label, "true")); try { JobDefinition updatedJobDefn = new JobDefinition.Builder().from(resolvedJobDefn) .withLabels(updatedLabels).build(); logger.debug("Added isResubmit label"); return updatedJobDefn; } catch (InvalidJobException e) { logger.error(e.getMessage()); return resolvedJobDefn; } } else { logger.debug("Job " + resolvedJobDefn.getName() + " already has isResubmit label. Don't add new"); return resolvedJobDefn; } } }
4,370
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/JobClusterActor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import akka.actor.AbstractActorWithTimers; import akka.actor.ActorRef; import akka.actor.Props; import akka.actor.SupervisorStrategy; import akka.actor.Terminated; import io.mantisrx.shaded.com.google.common.collect.Lists; import com.mantisrx.common.utils.LabelUtils; import com.netflix.fenzo.triggers.CronTrigger; import com.netflix.fenzo.triggers.TriggerOperator; import com.netflix.fenzo.triggers.exceptions.SchedulerException; import com.netflix.fenzo.triggers.exceptions.TriggerNotFoundException; import com.netflix.spectator.api.BasicTag; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.common.Label; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.common.metrics.spectator.GaugeCallback; import io.mantisrx.common.metrics.spectator.MetricGroupId; import io.mantisrx.master.akka.MantisActorSupervisorStrategy; import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter.JobIdInfo; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventsProto; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.JobActor; import io.mantisrx.master.jobcluster.job.JobHelper; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl; import io.mantisrx.master.jobcluster.job.MantisJobMetadataView; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DeleteJobClusterResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.JobClustersManagerInitializeResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.KillJobResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListCompletedJobsInClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListCompletedJobsInClusterResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobCriteria; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListWorkersRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListWorkersResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLARequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLAResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse; import io.mantisrx.master.jobcluster.proto.JobClusterProto; import io.mantisrx.master.jobcluster.proto.JobClusterProto.JobStartedEvent; import io.mantisrx.master.jobcluster.proto.JobClusterProto.KillJobRequest; import io.mantisrx.master.jobcluster.proto.JobProto; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.descriptor.StageSchedulingInfo; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.master.ConstraintsEvaluators; import io.mantisrx.server.master.InvalidJobRequest; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.domain.IJobClusterDefinition; import io.mantisrx.server.master.domain.IJobClusterDefinition.CronPolicy; import io.mantisrx.server.master.domain.JobClusterConfig; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.domain.SLA; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.exceptions.JobClusterAlreadyExistsException; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.scheduler.WorkerEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.functions.Action1; import rx.schedulers.Schedulers; import rx.subjects.BehaviorSubject; import java.time.Duration; import java.time.Instant; import java.util.*; import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.stream.Collectors; import static akka.pattern.PatternsCS.ask; import static io.mantisrx.master.StringConstants.MANTIS_MASTER_USER; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_NOT_FOUND; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS; import static java.util.Optional.empty; import static java.util.Optional.of; import static java.util.Optional.ofNullable; /** * Actor responsible for handling all operations related to one Job Cluster. * @author njoshi * */ public class JobClusterActor extends AbstractActorWithTimers implements IJobClusterManager { private static final int BOOKKEEPING_INTERVAL_SECS = 5; private static final String BOOKKEEPING_TIMER_KEY = "JOB_CLUSTER_BOOKKEEPING"; private static final Integer DEFAULT_LIMIT = 100; private static final Integer DEFAULT_ACTIVE_JOB_LIMIT = 5000; private final Logger logger = LoggerFactory.getLogger(JobClusterActor.class); private static final String CHECK_EXPIRED_TIMER_KEY = "EXPIRE_OLD_JOBS"; private static final long EXPIRED_JOBS_CHECK_INTERVAL_SECS = 3600; private final Counter numJobSubmissions; private final Counter numJobShutdowns; private final Counter numJobActorCreationCounter; private final Counter numJobClustersInitialized; private final Counter numJobClusterInitializeFailures; private final Counter numJobsInitialized; private final Counter numJobSubmissionFailures; private final Counter numJobClusterEnable; private final Counter numJobClusterEnableErrors; private final Counter numJobClusterDisable; private final Counter numJobClusterDisableErrors; private final Counter numJobClusterDelete; private final Counter numJobClusterDeleteErrors; private final Counter numJobClusterUpdate; private final Counter numJobClusterUpdateErrors; private final Counter numSLAEnforcementExecutions; public static Props props(final String name, final MantisJobStore jobStore, final MantisScheduler mantisScheduler, final LifecycleEventPublisher eventPublisher) { return Props.create(JobClusterActor.class, name, jobStore, mantisScheduler, eventPublisher); } private Receive initializedBehavior; private Receive disabledBehavior; private final String name; private final MantisJobStore jobStore; private IJobClusterMetadata jobClusterMetadata; private CronManager cronManager; private SLAEnforcer slaEnforcer; private final JobManager jobManager; private final MantisScheduler mantisScheduler; private final LifecycleEventPublisher eventPublisher; private BehaviorSubject<JobId> jobIdSubmissionSubject; private final JobDefinitionResolver jobDefinitionResolver = new JobDefinitionResolver(); public JobClusterActor(final String name, final MantisJobStore jobStore, final MantisScheduler scheduler, final LifecycleEventPublisher eventPublisher) { this.name = name; this.jobStore = jobStore; this.mantisScheduler = scheduler; this.eventPublisher = eventPublisher; this.jobManager = new JobManager(name, getContext(), mantisScheduler, eventPublisher, jobStore); jobIdSubmissionSubject = BehaviorSubject.create(); initializedBehavior = buildInitializedBehavior(); disabledBehavior = buildDisabledBehavior(); MetricGroupId metricGroupId = getMetricGroupId(name); Metrics m = new Metrics.Builder() .id(metricGroupId) .addCounter("numJobSubmissions") .addCounter("numJobSubmissionFailures") .addCounter("numJobShutdowns") .addCounter("numJobActorCreationCounter") .addCounter("numJobsInitialized") .addCounter("numJobClustersInitialized") .addCounter("numJobClusterInitializeFailures") .addCounter("numJobClusterEnable") .addCounter("numJobClusterEnableErrors") .addCounter("numJobClusterDisable") .addCounter("numJobClusterDisableErrors") .addCounter("numJobClusterDelete") .addCounter("numJobClusterDeleteErrors") .addCounter("numJobClusterUpdate") .addCounter("numJobClusterUpdateErrors") .addCounter("numSLAEnforcementExecutions") .addGauge(new GaugeCallback(metricGroupId, "acceptedJobsGauge", () -> 1.0 * this.jobManager.acceptedJobsCount())) .addGauge(new GaugeCallback(metricGroupId, "activeJobsGauge", () -> 1.0 * this.jobManager.activeJobsCount())) .addGauge(new GaugeCallback(metricGroupId, "terminatingJobsGauge", () -> 1.0 * this.jobManager.terminatingJobsMap.size())) .addGauge(new GaugeCallback(metricGroupId, "completedJobsGauge", () -> 1.0 * this.jobManager.completedJobsCache.completedJobs.size())) .addGauge(new GaugeCallback(metricGroupId, "actorToJobIdMappingsGauge", () -> 1.0 * this.jobManager.actorToJobIdMap.size())) .build(); m = MetricsRegistry.getInstance().registerAndGet(m); this.numJobSubmissions = m.getCounter("numJobSubmissions"); this.numJobActorCreationCounter = m.getCounter("numJobActorCreationCounter"); this.numJobSubmissionFailures = m.getCounter("numJobSubmissionFailures"); this.numJobShutdowns = m.getCounter("numJobShutdowns"); this.numJobsInitialized = m.getCounter("numJobsInitialized"); this.numJobClustersInitialized = m.getCounter("numJobClustersInitialized"); this.numJobClusterInitializeFailures = m.getCounter("numJobClusterInitializeFailures"); this.numJobClusterEnable = m.getCounter("numJobClusterEnable"); this.numJobClusterDisable = m.getCounter("numJobClusterDisable"); this.numJobClusterDelete = m.getCounter("numJobClusterDelete"); this.numJobClusterUpdate = m.getCounter("numJobClusterUpdate"); this.numJobClusterEnableErrors = m.getCounter("numJobClusterEnableErrors"); this.numJobClusterDisableErrors = m.getCounter("numJobClusterDisableErrors"); this.numJobClusterDeleteErrors = m.getCounter("numJobClusterDeleteErrors"); this.numJobClusterUpdateErrors = m.getCounter("numJobClusterUpdateErrors"); this.numSLAEnforcementExecutions = m.getCounter("numSLAEnforcementExecutions"); } @Override public Receive createReceive() { return buildInitialBehavior(); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* JobCluster Actor behaviors 30 total // CLUSTER RELATED * - Init * - UpdateJC * - UpdateLabel * - UpdateSLA * - UpdateArtifact * - UpdateMigrationStrat * * - ENABLE JC * - DISABLE JC * * - GET CLUSTER * - DELETE * * - ENFORCE SLA * - TRIGGER CRON * - EXPIRE OLD JOBS * * - LIST archived workers * - LIST completed jobs * - GET LAST SUBMITTED JOB * - LIST JOB IDS * - LIST JOBS * - LIST WORKERS -> (pass thru to each Job Actor) * * // pass thru to JOB * - SUBMIT JOB -> (INIT JOB on Job Actor) * - GET JOB -> (pass thru Job Actor) * - GET JOB SCHED INFO -> (pass thru Job Actor) * - KILL JOB -> (pass thru Job Actor) * - RESUBMIT WORKER -> (pass thru Job Actor) * - KILL JOB Response * - JOB SHUTDOWN EVENT * - WORKER EVENT -> (pass thru Job Actor) * - SCALE JOB -> (pass thru Job Actor) * * - JOB INITED * - JOB STARTED * */ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /** * DISABLED BEHAVIOR * EXPECTED EVENTS (14) * * * - UpdateJC * - UpdateLabel * - UpdateSLA * - UpdateArtifact * - UpdateMigrationStrat * - ENABLE JC * - GET CLUSTER * - DELETE * - LIST archived workers * - LIST completed jobs * - KILL JOB Response * - JOB SHUTDOWN EVENT * - EXPIRE OLD JOBS * - WORKER EVENT ( KILL WORKER) * * UNEXPECTED EVENTS (16) * - Init * - DISABLE JC * - ENFORCE SLA * - TRIGGER CRON * - LIST JOB IDS * - LIST JOBS * - LIST WORKERS -> (pass thru to each Job Actor) * - SUBMIT JOB -> (INIT JOB on Job Actor) * - GET JOB -> (pass thru Job Actor) * - GET JOB SCHED INFO -> (pass thru Job Actor) * - KILL JOB -> (pass thru Job Actor) * - RESUBMIT WORKER -> (pass thru Job Actor) * - SCALE JOB -> (pass thru Job Actor) * - JOB INITED * - JOB STARTED * - GET LAST SUBMITTED JOB * * @return */ private Receive buildDisabledBehavior() { String state = "disabled"; return receiveBuilder() // EXPECTED MESSAGES BEGIN // .match(UpdateJobClusterRequest.class, this::onJobClusterUpdate) .match(UpdateJobClusterLabelsRequest.class, this::onJobClusterUpdateLabels) .match(UpdateJobClusterSLARequest.class, this::onJobClusterUpdateSLA) .match(UpdateJobClusterArtifactRequest.class, this::onJobClusterUpdateArtifact) .match(UpdateJobClusterWorkerMigrationStrategyRequest.class, this::onJobClusterUpdateWorkerMigrationConfig) .match(GetJobClusterRequest.class , this::onJobClusterGet) .match(JobClusterProto.DeleteJobClusterRequest.class, this::onJobClusterDelete) .match(ListArchivedWorkersRequest.class, this::onListArchivedWorkers) .match(ListCompletedJobsInClusterRequest.class, this::onJobListCompleted) .match(JobClusterProto.KillJobResponse.class, this::onKillJobResponse) .match(GetJobDetailsRequest.class, this::onGetJobDetailsRequest) .match(WorkerEvent.class, this::onWorkerEvent) .match(JobClusterProto.ExpireOldJobsRequest.class, this::onExpireOldJobs) .match(EnableJobClusterRequest.class, this::onJobClusterEnable) .match(Terminated.class, this::onTerminated) // EXPECTED MESSAGES END // // UNEXPECTED MESSAGES BEGIN // // from user job submit request .match(SubmitJobRequest.class, (x) -> getSender().tell(new SubmitJobResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty() ), getSelf())) .match(ResubmitWorkerRequest.class, (x) -> getSender().tell(new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf())) .match(JobProto.JobInitialized.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state))) .match(JobStartedEvent.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state))) .match(ScaleStageRequest.class, (x) -> getSender().tell(new ScaleStageResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), 0), getSelf())) .match(KillJobRequest.class, (x) -> x.requestor.tell(new KillJobResponse(x.requestId, CLIENT_ERROR, JobState.Noop, genUnexpectedMsg(x.toString(), this.name, state), x.jobId, x.user), getSelf())) .match(GetJobDetailsRequest.class, (x) -> getSender().tell(new GetJobDetailsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf())) .match(GetJobSchedInfoRequest.class, (x) -> getSender().tell(new GetJobSchedInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf())) .match(GetLatestJobDiscoveryInfoRequest.class, (x) -> getSender().tell(new GetLatestJobDiscoveryInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf())) .match(GetLastSubmittedJobIdStreamRequest.class, (x) -> getSender().tell(new GetLastSubmittedJobIdStreamResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf())) .match(ListJobIdsRequest.class, (x) -> getSender().tell(new ListJobIdsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), new ArrayList()), getSelf())) .match(ListJobsRequest.class, (x) -> getSender().tell(new ListJobsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), new ArrayList()), getSelf())) .match(ListWorkersRequest.class, (x) -> getSender().tell(new ListWorkersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), new ArrayList()), getSelf())) .match(JobClusterProto.EnforceSLARequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state))) .match(JobClusterProto.TriggerCronRequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state))) .match(DisableJobClusterRequest.class, (x) -> getSender().tell(new DisableJobClusterResponse(x.requestId, SUCCESS,"Cluster is already disabled"), getSelf())) .match(Terminated.class, this::onTerminated) .match(JobClusterProto.InitializeJobClusterRequest.class, (x) -> getSender().tell(new JobClustersManagerInitializeResponse(x.requestId, SUCCESS,"Cluster is already initialized"), getSelf())) // UNEXPECTED MESSAGES END // .matchAny(x -> logger.warn("unexpected message '{}' received by JobCluster actor {} in Disabled State", x, this.name)) .build(); } private String genUnexpectedMsg(String event, String cluster, String state) { return String.format("Unexpected message %s received by JobCluster actor %s in %s State", event, cluster, state); } /** * INITIAL BEHAVIOR * EXPECTED EVENTS (1) * - Init * * * UNEXPECTED EVENTS (29) * - UpdateJC * - UpdateLabel * - UpdateSLA * - UpdateArtifact * - UpdateMigrationStrat * - ENABLE JC * - GET CLUSTER * - DELETE * - LIST archived workers * - LIST completed jobs * - KILL JOB Response * - JOB SHUTDOWN EVENT * - EXPIRE OLD JOBS * - WORKER EVENT ( KILL WORKER) * - DISABLE JC * - ENFORCE SLA * - TRIGGER CRON * - LIST JOB IDS * - LIST JOBS * - LIST WORKERS -> (pass thru to each Job Actor) * - SUBMIT JOB -> (INIT JOB on Job Actor) * - GET JOB -> (pass thru Job Actor) * - GET JOB SCHED INFO -> (pass thru Job Actor) * - KILL JOB -> (pass thru Job Actor) * - RESUBMIT WORKER -> (pass thru Job Actor) * - SCALE JOB -> (pass thru Job Actor) * - JOB INITED * - JOB STARTED * - GET LAST SUBMITTED JOB * * @return */ private Receive buildInitialBehavior() { String state = "Uninited"; return receiveBuilder() // EXPECTED MESSAGES BEGIN // .match(JobClusterProto.InitializeJobClusterRequest.class, this::onJobClusterInitialize) // EXPECTED MESSAGES END // // UNEXPECTED MESSAGES BEGIN // .match(UpdateJobClusterRequest.class, (x) -> getSender().tell(new UpdateJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf())) .match(UpdateJobClusterLabelsRequest.class, (x) -> getSender().tell(new UpdateJobClusterLabelsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf())) .match(UpdateJobClusterSLARequest.class, (x) -> getSender().tell(new UpdateJobClusterSLAResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf())) .match(UpdateJobClusterArtifactRequest.class, (x) -> getSender().tell(new UpdateJobClusterArtifactResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf())) .match(UpdateJobClusterWorkerMigrationStrategyRequest.class, (x) -> getSender().tell(new UpdateJobClusterWorkerMigrationStrategyResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf())) .match(GetJobClusterRequest.class, (x) -> getSender().tell(new GetJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty() ), getSelf())) .match(JobClusterProto.DeleteJobClusterRequest.class, (x) -> getSender().tell(new DeleteJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf())) .match(ListArchivedWorkersRequest.class, (x) -> getSender().tell(new ListArchivedWorkersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), Lists.newArrayList()), getSelf())) .match(ListCompletedJobsInClusterRequest.class, (x) -> getSender().tell(new ListCompletedJobsInClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), Lists.newArrayList()), getSelf())) .match(JobClusterProto.KillJobResponse.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state))) .match(GetJobDetailsRequest.class, (x) -> getSender().tell(new GetJobDetailsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf())) .match(WorkerEvent.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state))) .match(JobClusterProto.ExpireOldJobsRequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state))) .match(EnableJobClusterRequest.class, (x) -> getSender().tell(new EnableJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf())) .match(SubmitJobRequest.class, (x) -> getSender().tell(new SubmitJobResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty() ), getSelf())) .match(ResubmitWorkerRequest.class, (x) -> getSender().tell(new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf())) .match(JobProto.JobInitialized.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state))) .match(JobStartedEvent.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state))) .match(ScaleStageRequest.class, (x) -> getSender().tell(new ScaleStageResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), 0), getSelf())) .match(KillJobRequest.class, (x) -> getSender().tell(new KillJobResponse(x.requestId, CLIENT_ERROR, JobState.Noop, genUnexpectedMsg(x.toString(), this.name, state), x.jobId, x.user), getSelf())) .match(GetJobSchedInfoRequest.class, (x) -> getSender().tell(new GetJobSchedInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf())) .match(GetLatestJobDiscoveryInfoRequest.class, (x) -> getSender().tell(new GetLatestJobDiscoveryInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf())) .match(GetLastSubmittedJobIdStreamRequest.class, (x) -> getSender().tell(new GetLastSubmittedJobIdStreamResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), empty()), getSelf())) .match(ListJobIdsRequest.class, (x) -> getSender().tell(new ListJobIdsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), Lists.newArrayList()), getSelf())) .match(ListJobsRequest.class, (x) -> getSender().tell(new ListJobsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), Lists.newArrayList()), getSelf())) .match(ListWorkersRequest.class, (x) -> getSender().tell(new ListWorkersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state), Lists.newArrayList()), getSelf())) .match(JobClusterProto.EnforceSLARequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state))) .match(JobClusterProto.ExpireOldJobsRequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state))) .match(JobClusterProto.TriggerCronRequest.class, (x) -> logger.warn(genUnexpectedMsg(x.toString(), this.name, state))) .match(DisableJobClusterRequest.class, (x) -> getSender().tell(new DisableJobClusterResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.name, state)), getSelf())) .match(Terminated.class, this::onTerminated) // UNEXPECTED MESSAGES END // .matchAny(x -> logger.warn("unexpected message '{}' received by JobCluster actor {} in Uninited State", x, this.name)) .build(); } /** * INITED BEHAVIOR * EXPECTED EVENTS (29) * - UpdateJC * - UpdateLabel * - UpdateSLA * - UpdateArtifact * - UpdateMigrationStrat * - ENABLE JC * - GET CLUSTER * - DELETE * - LIST archived workers * - LIST completed jobs * - KILL JOB Response * - JOB SHUTDOWN EVENT * - EXPIRE OLD JOBS * - WORKER EVENT ( KILL WORKER) * - DISABLE JC * - ENFORCE SLA * - TRIGGER CRON * - LIST JOB IDS * - LIST JOBS * - LIST WORKERS -> (pass thru to each Job Actor) * - SUBMIT JOB -> (INIT JOB on Job Actor) * - GET JOB -> (pass thru Job Actor) * - GET JOB SCHED INFO -> (pass thru Job Actor) * - KILL JOB -> (pass thru Job Actor) * - RESUBMIT WORKER -> (pass thru Job Actor) * - SCALE JOB -> (pass thru Job Actor) * - JOB INITED * - JOB STARTED * - GET LAST SUBMITTED JOB * * UNEXPECTED EVENTS (1) * - Init * * * @return */ private Receive buildInitializedBehavior() { String state = "Initialized"; return receiveBuilder() // EXPECTED MESSAGES BEGIN // .match(UpdateJobClusterRequest.class, this::onJobClusterUpdate) .match(UpdateJobClusterLabelsRequest.class, this::onJobClusterUpdateLabels) .match(UpdateJobClusterSLARequest.class, this::onJobClusterUpdateSLA) .match(UpdateJobClusterArtifactRequest.class, this::onJobClusterUpdateArtifact) .match(UpdateJobClusterWorkerMigrationStrategyRequest.class, this::onJobClusterUpdateWorkerMigrationConfig) .match(EnableJobClusterRequest.class, (x) -> getSender().tell( new EnableJobClusterResponse(x.requestId, SUCCESS, genUnexpectedMsg(x.toString(), this.name, state)), getSelf())) .match(GetJobClusterRequest.class, this::onJobClusterGet) .match(JobClusterProto.DeleteJobClusterRequest.class, this::onJobClusterDelete) .match(ListArchivedWorkersRequest.class, this::onListArchivedWorkers) .match(ListCompletedJobsInClusterRequest.class, this::onJobListCompleted) .match(JobClusterProto.KillJobResponse.class, this::onKillJobResponse) .match(JobClusterProto.ExpireOldJobsRequest.class, this::onExpireOldJobs) .match(WorkerEvent.class, this::onWorkerEvent) .match(DisableJobClusterRequest.class, this::onJobClusterDisable) .match(JobClusterProto.EnforceSLARequest.class, this::onEnforceSLARequest) .match(JobClusterProto.BookkeepingRequest.class, this::onBookkeepingRequest) .match(JobClusterProto.TriggerCronRequest.class, this::onTriggerCron) .match(ListJobIdsRequest.class, this::onJobIdList) .match(ListJobsRequest.class, this::onJobList) .match(ListWorkersRequest.class, this::onListActiveWorkers) .match(SubmitJobRequest.class, this::onJobSubmit) .match(GetJobDetailsRequest.class, this::onGetJobDetailsRequest) .match(GetJobSchedInfoRequest.class, this::onGetJobStatusSubject) .match(GetLatestJobDiscoveryInfoRequest.class, this::onGetLatestJobDiscoveryInfo) .match(KillJobRequest.class, this::onJobKillRequest) .match(ResubmitWorkerRequest.class, this::onResubmitWorkerRequest) .match(JobProto.JobInitialized.class, this::onJobInitialized) .match(JobStartedEvent.class, this::onJobStarted) .match(GetLastSubmittedJobIdStreamRequest.class, this::onGetLastSubmittedJobIdSubject) .match(ScaleStageRequest.class, this::onScaleStage) // EXPECTED MESSAGES END // // EXPECTED MESSAGES BEGIN // .match(JobClusterProto.InitializeJobClusterRequest.class,(x) -> getSender().tell( new JobClustersManagerInitializeResponse(x.requestId, SUCCESS, "Cluster is already initialized"), getSelf())) // UNEXPECTED MESSAGES END // .match(Terminated.class, this::onTerminated) .matchAny(x -> { logger.info("unexpected message '{}' received by JobCluster actor {} in Initialized State." + "from class {}", x, this.name, x.getClass().getCanonicalName()); // TODO getSender().tell(); }) .build(); } MetricGroupId getMetricGroupId(String name) { return new MetricGroupId("JobClusterActor", new BasicTag("jobCluster", name)); } @Override public void preStart() throws Exception { logger.info("JobClusterActor {} started", name); super.preStart(); } @Override public void postStop() throws Exception { logger.info("JobClusterActor {} stopped", name); super.postStop(); if (name != null) { // de-register metrics from MetricsRegistry MetricsRegistry.getInstance().remove(getMetricGroupId(name)); } } @Override public void preRestart(Throwable t, Optional<Object> m) throws Exception { logger.info("{} preRestart {} (exc: {})", name, m, t.getMessage()); // do not kill all children, which is the default here // super.preRestart(t, m); } @Override public void postRestart(Throwable reason) throws Exception { logger.info("{} postRestart (exc={})", name, reason.getMessage()); super.postRestart(reason); } @Override public SupervisorStrategy supervisorStrategy() { // custom supervisor strategy to resume the child actors on Exception instead of the default restart return MantisActorSupervisorStrategy.getInstance().create(); } private void setBookkeepingTimer(long checkAgainInSecs) { getTimers().startPeriodicTimer(BOOKKEEPING_TIMER_KEY, new JobClusterProto.BookkeepingRequest(), Duration.ofSeconds(checkAgainInSecs)); } private void setExpiredJobsTimer(long checkAgainInSecs) { getTimers().startPeriodicTimer(CHECK_EXPIRED_TIMER_KEY, new JobClusterProto.ExpireOldJobsRequest(), Duration.ofSeconds(checkAgainInSecs)); } /** * Initialize cluster request sent by JCM. Called in following cases. * 1. Master bootup : Already exists in DB * 2. new cluster is being created : Requires the createInStore flag to be set. If writing to DB fails a * failure message is sent back. The caller should then kill this * * actor and inform upstream of the failure * * @param initReq */ @Override public void onJobClusterInitialize(JobClusterProto.InitializeJobClusterRequest initReq) { ActorRef sender = getSender(); logger.info("In onJobClusterInitialize {}", this.name); if (logger.isDebugEnabled()) { logger.debug("Init Request {}", initReq); } jobClusterMetadata = new JobClusterMetadataImpl.Builder() .withLastJobCount(initReq.lastJobNumber) .withIsDisabled(initReq.isDisabled) .withJobClusterDefinition(initReq.jobClusterDefinition) .build(); // create sla enforcer slaEnforcer = new SLAEnforcer(jobClusterMetadata.getJobClusterDefinition().getSLA()); long expireFrequency = ConfigurationProvider.getConfig().getCompletedJobPurgeFrequencySeqs(); // If cluster is disabled if(jobClusterMetadata.isDisabled()) { logger.info("Cluster {} initialized but is Disabled", jobClusterMetadata .getJobClusterDefinition().getName()); // add completed jobs to cache to use when / if cluster is reenabled jobManager.addCompletedJobsToCache(initReq.completedJobsList); int count = 50; if(!initReq.jobList.isEmpty()) { logger.info("Cluster {} is disabled however it has {} active/accepted jobs", jobClusterMetadata.getJobClusterDefinition().getName(), initReq.jobList.size()); for(IMantisJobMetadata jobMeta : initReq.jobList) { try { if(count == 0) { logger.info("Max cleanup limit of 50 reached abort"); break; } if(!JobState.isTerminalState(jobMeta.getState())) { logger.info("Job {} is in non terminal state {} for disabled cluster {}." + "Marking it complete", jobMeta.getJobId(), jobMeta.getState(), jobClusterMetadata.getJobClusterDefinition().getName()); count--; jobManager.markCompletedDuringStartup(jobMeta.getJobId(), System.currentTimeMillis(), jobMeta, JobState.Completed); jobStore.archiveJob(jobMeta); } } catch (Exception e) { logger.error("Exception {} archiving job {} during init ",e.getMessage(), jobMeta.getJobId()); } } } sender.tell(new JobClusterProto.InitializeJobClusterResponse(initReq.requestId, SUCCESS, String.format("JobCluster %s initialized successfully. But is currently disabled", initReq.jobClusterDefinition.getName()),initReq.jobClusterDefinition.getName(), initReq.requestor), getSelf()); logger.info("Job expiry check frquency set to {}", expireFrequency); setExpiredJobsTimer(expireFrequency); getContext().become(disabledBehavior); return; } else { // new cluster initialization if (initReq.createInStore) { try { jobStore.createJobCluster(jobClusterMetadata); eventPublisher.publishAuditEvent( new LifecycleEventsProto.AuditEvent( LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_CREATE, jobClusterMetadata.getJobClusterDefinition().getName(), "saved job cluster " + name) ); logger.info("successfully saved job cluster {}", name); numJobClustersInitialized.increment(); } catch (final JobClusterAlreadyExistsException exists) { numJobClusterInitializeFailures.increment(); logger.error("job cluster not created"); sender.tell(new JobClusterProto.InitializeJobClusterResponse(initReq.requestId, CLIENT_ERROR, String.format("JobCluster %s already exists", initReq.jobClusterDefinition.getName()), initReq.jobClusterDefinition.getName(), initReq.requestor), getSelf()); // TODO: handle case when job cluster exists in store but Job cluster actor is not running return; } catch (final Exception e) { numJobClusterInitializeFailures.increment(); logger.error("job cluster not created due to {}", e.getMessage(), e); sender.tell(new JobClusterProto.InitializeJobClusterResponse(initReq.requestId, SERVER_ERROR, String.format("JobCluster %s not created due to %s", initReq.jobClusterDefinition.getName(), e.getMessage()), initReq.jobClusterDefinition.getName(), initReq.requestor), getSelf()); // TODO: send PoisonPill to self if job cluster was not created ? Return'ing for now, // so we don't send back 2 InitJobClusterResponses return; } } try { cronManager = new CronManager(name, getSelf(), jobClusterMetadata.getJobClusterDefinition().getSLA()); } catch (Exception e) { logger.warn("Exception initializing cron {}", e); } initRunningJobs(initReq, sender); setExpiredJobsTimer(expireFrequency); logger.info("Job expiry check frquency set to {}", expireFrequency); try { jobManager.addCompletedJobsToCache(initReq.completedJobsList); } catch(Exception e) { logger.warn("Exception initializing completed jobs " + e.getMessage()); } } } /** * Iterate through list of jobs in Active jobs table. * if a Job is completed move it completed table * else bootstrap the job (create actor, send init request) * Finally setup sla enforcement * @param initReq * @param sender */ private void initRunningJobs(JobClusterProto.InitializeJobClusterRequest initReq, ActorRef sender) { List<CompletedJob> completedJobsList = initReq.completedJobsList; List<IMantisJobMetadata> jobList = initReq.jobList; logger.info("In _initJobs for cluster {}: {} activeJobs and {} completedJobs", name, jobList.size(), completedJobsList.size()); if (logger.isDebugEnabled()) { logger.debug("In _initJobs for cluster {} activeJobs -> {} and completedJobs -> {}", name, jobList, completedJobsList); } Observable.from(jobList) .flatMap((jobMeta) -> { if(JobState.isTerminalState(jobMeta.getState())) { jobManager.persistToCompletedJobAndArchiveJobTables(jobMeta); return Observable.empty(); } else { if(jobMeta.getSchedulingInfo() == null) { logger.error("Scheduling info is null for active job {} in cluster {}." + "Skipping bootstrap ", jobMeta.getJobId(), name); return Observable.empty(); } else { return Observable.just(jobMeta); } } }) // .flatMap((jobMeta) -> jobManager.bootstrapJob((MantisJobMetadataImpl)jobMeta, this.jobClusterMetadata)) .subscribe((jobInited) -> { logger.info("Job Id {} initialized with code {}", jobInited.jobId, jobInited.responseCode); }, (error) -> logger.warn("Exception initializing jobs {}", error.getMessage()) ,() -> { // Push the last jobId if(initReq.jobList.size() > 0) { JobId lastJobId = new JobId(this.name, initReq.lastJobNumber); this.jobIdSubmissionSubject.onNext(lastJobId); } setBookkeepingTimer(BOOKKEEPING_INTERVAL_SECS); getContext().become(initializedBehavior); logger.info("Job Cluster {} initialized", this.name); sender.tell(new JobClusterProto.InitializeJobClusterResponse(initReq.requestId, SUCCESS, String.format("JobCluster %s initialized successfully", initReq.jobClusterDefinition.getName()), initReq.jobClusterDefinition.getName(), initReq.requestor), getSelf()); } ); } @Override public void onJobClusterUpdate(final UpdateJobClusterRequest request) { final String name = request.getJobClusterDefinition().getName(); final ActorRef sender = getSender(); String givenArtifactVersion = request.getJobClusterDefinition().getJobClusterConfig().getVersion(); if (!isVersionUnique(givenArtifactVersion, jobClusterMetadata.getJobClusterDefinition() .getJobClusterConfigs())) { String msg = String.format("Job cluster %s not updated as the version %s is not unique", name, givenArtifactVersion); logger.error(msg); sender.tell(new UpdateJobClusterResponse(request.requestId, CLIENT_ERROR, msg), getSelf()); return; } IJobClusterDefinition currentJobClusterDefinition = jobClusterMetadata.getJobClusterDefinition(); JobClusterDefinitionImpl mergedJobClusterDefinition = new JobClusterDefinitionImpl.Builder() .mergeConfigsAndOverrideRest(currentJobClusterDefinition, request.getJobClusterDefinition()).build(); IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder() .withIsDisabled(jobClusterMetadata.isDisabled()) .withLastJobCount(jobClusterMetadata.getLastJobCount()) .withJobClusterDefinition(mergedJobClusterDefinition) .build(); try { updateAndSaveJobCluster(jobCluster); sender.tell(new UpdateJobClusterResponse(request.requestId, SUCCESS, name + " Job cluster updated"), getSelf()); numJobClusterUpdate.increment(); } catch (Exception e) { logger.error("job cluster not created"); sender.tell(new UpdateJobClusterResponse(request.requestId, SERVER_ERROR, name + " Job cluster updation failed " + e.getMessage()), getSelf()); numJobClusterUpdateErrors.increment(); } } @Override public void onJobClusterDelete(final JobClusterProto.DeleteJobClusterRequest request) { final ActorRef sender = getSender(); try { if(jobManager.isJobListEmpty()) { jobManager.cleanupAllCompletedJobs(); jobStore.deleteJobCluster(name); logger.info("successfully deleted job cluster {}", name); eventPublisher.publishAuditEvent( new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_DELETE, name, name + " deleted") ); sender.tell(new JobClusterProto.DeleteJobClusterResponse(request.requestId, SUCCESS, name + " deleted", request.requestingActor, name), getSelf()); numJobClusterDelete.increment(); } else { logger.warn("job cluster {} cannot be deleted as it has active jobs", name); sender.tell(new JobClusterProto.DeleteJobClusterResponse(request.requestId, CLIENT_ERROR, name + " Job cluster deletion failed as there are active jobs", request.requestingActor,name), getSelf()); } } catch( Exception e) { logger.error("job cluster {} not deleted", name); sender.tell(new JobClusterProto.DeleteJobClusterResponse(request.requestId, SERVER_ERROR, name + " Job cluster deletion failed " + e.getMessage(), request.requestingActor,name), getSelf()); numJobClusterDeleteErrors.increment(); } } @Override public void onJobIdList(final ListJobIdsRequest request) { if(logger.isTraceEnabled()) { logger.trace("Entering JCA:onJobIdList"); } final ActorRef sender = getSender(); Set<JobId> jobIdsFilteredByLabelsSet = new HashSet<>(); // If labels criterion is given prefilter by labels if(!request.getCriteria().getMatchingLabels().isEmpty()) { jobIdsFilteredByLabelsSet = jobManager.getJobsMatchingLabels(request.getCriteria().getMatchingLabels(), request.getCriteria().getLabelsOperand()); // Found no matching jobs for given labels exit if(jobIdsFilteredByLabelsSet.isEmpty()) { sender.tell(new ListJobIdsResponse(request.requestId, SUCCESS, "No JobIds match given Label criterion", new ArrayList<>()), sender); if(logger.isTraceEnabled()) { logger.trace("Exit JCA:onJobIdList"); } return; } } // Found jobs matching labels or no labels criterion given. List<JobIdInfo> jobIdList; // Apply additional filtering to non terminal jobs jobIdList = getFilteredNonTerminalJobIdList(request.filters, jobIdsFilteredByLabelsSet); if(!request.getCriteria().getActiveOnly().orElse(true)) { jobIdList.addAll(getFilteredTerminalJobIdList(request.filters, jobIdsFilteredByLabelsSet)); } sender.tell(new ListJobIdsResponse(request.requestId, SUCCESS, "", jobIdList), sender); if(logger.isTraceEnabled()) { logger.trace("Exit JCA:onJobIdList"); } } @Override public void onJobList(final ListJobsRequest request) { if(logger.isDebugEnabled()) { logger.info("Entering JCA:onJobList"); } final ActorRef sender = getSender(); final ActorRef self = getSelf(); Set<JobId> jobIdsFilteredByLabelsSet = new HashSet<>(); // If labels criterion is given prefilter by labels if(!request.getCriteria().getMatchingLabels().isEmpty()) { jobIdsFilteredByLabelsSet = jobManager.getJobsMatchingLabels(request.getCriteria().getMatchingLabels(), request.getCriteria().getLabelsOperand()); // Found no jobs matching labels exit if(jobIdsFilteredByLabelsSet.isEmpty()) { if(logger.isTraceEnabled()) { logger.trace("Exit JCA:onJobList {}" , jobIdsFilteredByLabelsSet.size()); } sender.tell(new ListJobsResponse(request.requestId, SUCCESS, "", new ArrayList<>()), self); return; } } // Found jobs matching labels or no labels criterion given. // Apply additional criterion to both active and completed jobs getFilteredNonTerminalJobList(request.getCriteria(),jobIdsFilteredByLabelsSet).mergeWith(getFilteredTerminalJobList(request.getCriteria(),jobIdsFilteredByLabelsSet)) .collect(() -> Lists.<MantisJobMetadataView>newArrayList(), List::add) .doOnNext(resultList -> { if(logger.isTraceEnabled()) { logger.trace("Exit JCA:onJobList {}" , resultList.size()); } sender.tell(new ListJobsResponse(request.requestId, SUCCESS, "", resultList), self); }) .subscribe(); } @Override public void onListArchivedWorkers(final ListArchivedWorkersRequest request) { if(logger.isTraceEnabled()) { logger.trace("In onListArchiveWorkers {}", request); } try { List<IMantisWorkerMetadata> workerList = jobStore.getArchivedWorkers(request.getJobId().getId()); if(workerList.size() > request.getLimit()) { workerList = workerList.subList(0, request.getLimit()); } if(logger.isTraceEnabled()) { logger.trace("Returning {} archived Workers", workerList.size()); } getSender().tell(new ListArchivedWorkersResponse(request.requestId, SUCCESS, "", workerList), getSelf()); } catch(Exception e) { logger.error("Exception listing archived workers", e); getSender().tell(new ListArchivedWorkersResponse(request.requestId, SERVER_ERROR, "Exception getting archived workers for job " + request.getJobId() + " -> " + e.getMessage(), Lists.newArrayList()), getSelf()); } } public void onListActiveWorkers(final ListWorkersRequest r) { if(logger.isTraceEnabled()) { logger.trace("Enter JobClusterActor:onListActiveWorkers {}", r); } Optional<JobInfo> jobInfo = jobManager.getJobInfoForNonTerminalJob(r.getJobId()); if(jobInfo.isPresent()) { jobInfo.get().jobActor.forward(r, getContext()); } else { logger.warn("No such active job {} ", r.getJobId()); getSender().tell(new ListWorkersResponse(r.requestId,CLIENT_ERROR,"No such active job " + r.getJobId(), Lists.newArrayList()),getSelf()); } if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:onListActiveWorkers {}", r); } } private List<JobIdInfo> getFilteredNonTerminalJobIdList(ListJobCriteria request, Set<JobId> prefilteredJobIdSet) { if(logger.isTraceEnabled()) { logger.trace("Enter JobClusterActor:getFilteredNonTerminalJobIdList {}", request); } if((request.getJobState().isPresent() && request.getJobState().get().equals(JobState.MetaState.Terminal))) { if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredNonTerminalJobIdList with empty"); } return Collections.emptyList(); } List<JobInfo> jobInfoList; if(!prefilteredJobIdSet.isEmpty()) { jobInfoList = prefilteredJobIdSet.stream().map((jId) -> jobManager.getJobInfoForNonTerminalJob(jId)) .filter((jInfoOp) -> jInfoOp.isPresent()).map((jInfoOp) -> jInfoOp.get()).collect(Collectors.toList()); } else { jobInfoList = jobManager.getAllNonTerminalJobsList(); } List<JobInfo> shortenedList = jobInfoList.subList(0, Math.min(jobInfoList.size(), request.getLimit().orElse(DEFAULT_LIMIT))); List<JobIdInfo> jIdList = shortenedList.stream() .map((JobInfo jInfo) -> new JobIdInfo.Builder() .withJobId(jInfo.jobId) .withJobState(jInfo.state) .withSubmittedAt(jInfo.submittedAt) .withTerminatedAt(jInfo.terminatedAt) .withUser(jInfo.user) .withVersion(jInfo.jobDefinition.getVersion()) .build()) .collect(Collectors.toList());; if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredNonTerminalJobIdList {}", jIdList.size()); } return jIdList; } private List<JobIdInfo> getFilteredTerminalJobIdList(ListJobCriteria request, Set<JobId> prefilteredJobIdSet) { if(logger.isTraceEnabled()) { logger.trace("Enter JobClusterActor:getFilteredTerminalJobIdList {}", request); } if((request.getJobState().isPresent() && !request.getJobState().get().equals(JobState.MetaState.Terminal))) { if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredTerminalJobIdList with empty"); } return Collections.emptyList(); } else if(!request.getJobState().isPresent() && (request.getActiveOnly().isPresent() && request.getActiveOnly().get())) { if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredTerminalJobIdList with empty"); } return Collections.emptyList(); } List<CompletedJob> completedJobsList; if(!prefilteredJobIdSet.isEmpty()) { completedJobsList = prefilteredJobIdSet.stream().map((jId) -> jobManager.getCompletedJob(jId)).filter((cjOp) -> cjOp.isPresent()).map((cjop) -> cjop.get()).collect(Collectors.toList()); } else { completedJobsList = jobManager.getCompletedJobsList(); } List<CompletedJob> subsetCompletedJobs = completedJobsList.subList(0, Math.min(completedJobsList.size(), request.getLimit().orElse(DEFAULT_LIMIT))); List<JobIdInfo> completedJobIdList = subsetCompletedJobs.stream() .map((CompletedJob cJob) -> new JobIdInfo.Builder() .withJobIdStr(cJob.getJobId()) .withVersion(cJob.getVersion()) .withUser(cJob.getUser()) .withSubmittedAt(cJob.getSubmittedAt()) .withTerminatedAt(cJob.getTerminatedAt()) .withJobState(cJob.getState()) .build()) .filter(Objects::nonNull) .collect(Collectors.toList()); if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredTerminalJobIdList {}", completedJobIdList.size()); } return completedJobIdList; } private Observable<MantisJobMetadataView> getFilteredNonTerminalJobList(ListJobCriteria request, Set<JobId> prefilteredJobIdSet) { if(logger.isTraceEnabled()) { logger.trace("Entering JobClusterActor:getFilteredNonTerminalJobList"); } Duration timeout = Duration.ofMillis(500); if((request.getJobState().isPresent() && request.getJobState().get().equals(JobState.MetaState.Terminal))) { if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredNonTerminalJobList with empty"); } return Observable.empty(); } List<JobInfo> jobInfoList; // if(!prefilteredJobIdSet.isEmpty()) { jobInfoList = prefilteredJobIdSet.stream().map((jId) -> jobManager.getJobInfoForNonTerminalJob(jId)) .filter((jInfoOp) -> jInfoOp.isPresent()).map((jInfoOp) -> jInfoOp.get()).collect(Collectors.toList()); } else { // no prefiltering applied start with complete set of non terminal jobs jobInfoList = jobManager.getAllNonTerminalJobsList(); } List<JobInfo> shortenedList = jobInfoList.subList(0, Math.min(jobInfoList.size(), request.getLimit().orElse(DEFAULT_ACTIVE_JOB_LIMIT))); if(logger.isDebugEnabled()) { logger.debug("List of non terminal jobs {}", jobInfoList); } return Observable.from(shortenedList) .flatMap((jInfo) -> { GetJobDetailsRequest req = new GetJobDetailsRequest("system", jInfo.jobId); CompletionStage<GetJobDetailsResponse> respCS = ask(jInfo.jobActor, req, timeout) .thenApply(GetJobDetailsResponse.class::cast); return Observable.from(respCS.toCompletableFuture(), Schedulers.io()) .onErrorResumeNext(ex -> { logger.warn("caught exception {}", ex.getMessage(), ex); return Observable.empty(); }); }) .filter((resp) -> resp != null && resp.getJobMetadata().isPresent()) .map((resp) -> resp.getJobMetadata().get()) .map((metaData) -> new MantisJobMetadataView(metaData, request.getStageNumberList(), request.getWorkerIndexList(), request.getWorkerNumberList(), request.getWorkerStateList(),false)); } /** * JobState ActiveOnly Execute? * None None Y * None TRUE N * None FALSE Y * Active None N * Active TRUE N * Active FALSE N * Terminal None Y * Terminal TRUE Y * Terminal FALSE Y * @param request * @return */ private Observable<MantisJobMetadataView> getFilteredTerminalJobList(ListJobCriteria request, Set<JobId> jobIdSet) { if(logger.isTraceEnabled()) { logger.trace("JobClusterActor:getFilteredTerminalJobList"); } if((request.getJobState().isPresent() && !request.getJobState().get().equals(JobState.MetaState.Terminal))) { if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredTerminalJobList with empty"); } return Observable.empty(); } else if(!request.getJobState().isPresent() && (request.getActiveOnly().isPresent() && request.getActiveOnly().get())) { if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:getFilteredTerminalJobList with empty"); } return Observable.empty(); } List<CompletedJob> jobInfoList; if(!jobIdSet.isEmpty()) { jobInfoList = jobIdSet.stream().map((jId) -> jobManager.getCompletedJob(jId)) .filter((compJobOp) -> compJobOp.isPresent()).map((compJobOp) -> compJobOp.get()).collect(Collectors.toList()); } else { jobInfoList = jobManager.getCompletedJobsList(); } List<CompletedJob> shortenedList = jobInfoList.subList(0, Math.min(jobInfoList.size(), request.getLimit().orElse(DEFAULT_LIMIT))); return Observable.from(shortenedList) // terminatedAt comes from completed Job hence the different structure .flatMap((cJob) -> { try { if(logger.isDebugEnabled()) { logger.debug("Fetching details for completed job {}", cJob); } Optional<IMantisJobMetadata> metaOp = jobManager.getJobDataForCompletedJob(cJob.getJobId()); if(metaOp.isPresent()) { if(logger.isDebugEnabled()) { logger.debug ("Fetched details for completed job {} -> {}", cJob, metaOp.get()); } return Observable.just(new MantisJobMetadataView(metaOp.get(),cJob.getTerminatedAt(), request.getStageNumberList(), request.getWorkerIndexList(), request.getWorkerNumberList(), request.getWorkerStateList(),false)); } } catch(Exception e) { logger.error("caught exception", e); return Observable.empty(); } return Observable.empty(); }); } @Override public void onJobListCompleted(final ListCompletedJobsInClusterRequest request) { if(logger.isTraceEnabled()) { logger.trace ("Enter onJobListCompleted {}", request); } final ActorRef sender = getSender(); List<CompletedJob> completedJobsList = jobManager.getCompletedJobsList(); if(request.getLimit() > completedJobsList.size()) { completedJobsList = completedJobsList.subList(0, request.getLimit()); } sender.tell(new ListCompletedJobsInClusterResponse(request.requestId, SUCCESS, "", completedJobsList), sender); if(logger.isTraceEnabled()) { logger.trace ("Exit onJobListCompleted {}", completedJobsList.size()); } } @Override public void onJobClusterDisable(final DisableJobClusterRequest req) { if(logger.isTraceEnabled()) { logger.trace("Enter onJobClusterDisable {}", req); } ActorRef sender = getSender(); try { IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl.Builder().withIsDisabled(true) .withLastJobCount(this.jobClusterMetadata.getLastJobCount()) .withJobClusterDefinition((JobClusterDefinitionImpl)this.jobClusterMetadata.getJobClusterDefinition()) .build(); //update store jobStore.updateJobCluster(jobClusterMetadata); this.jobClusterMetadata = jobClusterMetadata; cronManager.destroyCron(); // change behavior to disabled getContext().become(disabledBehavior); // send kill requests for all non terminal jobs List<JobInfo> jobsToKill = new ArrayList<>(); jobsToKill.addAll(jobManager.getAcceptedJobsList()); jobsToKill.addAll(jobManager.getActiveJobsList()); for(JobInfo jobInfo : jobsToKill) { jobInfo.jobActor.tell( new KillJobRequest( jobInfo.jobId, "Job cluster disabled", JobCompletedReason.Killed, req.getUser(), ActorRef.noSender()), getSelf()); } // disable SLA check timers getTimers().cancel(BOOKKEEPING_TIMER_KEY); eventPublisher.publishAuditEvent( new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_DISABLED, jobClusterMetadata.getJobClusterDefinition().getName(), name + " disabled") ); sender.tell(new DisableJobClusterResponse(req.requestId, SUCCESS, String.format("%s disabled", name)), getSelf()); numJobClusterDisable.increment(); logger.info("Job Cluster {} is disabbled", this.name); } catch (Exception e) { String errorMsg = "Exception disabling cluster " + name + " due to " + e.getMessage(); logger.error(errorMsg,e); sender.tell(new DisableJobClusterResponse(req.requestId, SERVER_ERROR, errorMsg), getSelf()); numJobClusterDisableErrors.increment(); } if(logger.isTraceEnabled()) { logger.trace("Exit onJobClusterDisable"); } } @Override public void onJobClusterEnable(final EnableJobClusterRequest req) { if(logger.isTraceEnabled()) { logger.trace("Enter onJobClusterEnable"); } ActorRef sender = getSender(); try { IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl.Builder().withIsDisabled(false) .withLastJobCount(this.jobClusterMetadata.getLastJobCount()) .withJobClusterDefinition((JobClusterDefinitionImpl)this.jobClusterMetadata.getJobClusterDefinition()) .build(); //update store jobStore.updateJobCluster(jobClusterMetadata); this.jobClusterMetadata = jobClusterMetadata; if (cronManager == null) { cronManager = new CronManager(name, getSelf(), jobClusterMetadata.getJobClusterDefinition().getSLA()); } this.cronManager.initCron(); // change behavior to enabled getContext().become(initializedBehavior); //start SLA timer setBookkeepingTimer(BOOKKEEPING_INTERVAL_SECS); eventPublisher.publishAuditEvent( new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_ENABLED, this.jobClusterMetadata.getJobClusterDefinition().getName(), name + " enabled") ); sender.tell(new EnableJobClusterResponse(req.requestId, SUCCESS, String.format("%s enabled", name)), getSelf()); numJobClusterEnable.increment(); logger.info("Job Cluster {} is Enabled", this.name); } catch(Exception e) { String errorMsg = String.format("Exception enabling cluster %s due to %s", name, e.getMessage()); logger.error(errorMsg,e); sender.tell(new EnableJobClusterResponse(req.requestId, SERVER_ERROR, errorMsg), getSelf()); numJobClusterEnableErrors.increment(); } if(logger.isTraceEnabled()) { logger.trace("Enter onJobClusterEnable"); } } @Override public void onJobClusterGet(final GetJobClusterRequest request) { final ActorRef sender = getSender(); if(logger.isTraceEnabled()) { logger.trace("In JobCluster Get " + jobClusterMetadata); } if(this.name.equals(request.getJobClusterName())) { MantisJobClusterMetadataView clusterView = generateJobClusterMetadataView(this.jobClusterMetadata, this.jobClusterMetadata.isDisabled(), ofNullable(this.cronManager).map(x -> x.isCronActive).orElse(false)); sender.tell(new GetJobClusterResponse(request.requestId, SUCCESS, "", of(clusterView)), getSelf()); } else { sender.tell(new GetJobClusterResponse(request.requestId, CLIENT_ERROR, "Cluster Name " + request.getJobClusterName() + " in request Does not match cluster Name " + this.name + " of Job Cluster Actor", Optional.empty()), getSelf()); } if(logger.isTraceEnabled()) { logger.trace("Exit onJobClusterGet"); } } private MantisJobClusterMetadataView generateJobClusterMetadataView(IJobClusterMetadata jobClusterMetadata, boolean isDisabled, boolean cronActive) { return new MantisJobClusterMetadataView.Builder() .withName(jobClusterMetadata.getJobClusterDefinition().getName()) .withDisabled(isDisabled) .withIsReadyForJobMaster(jobClusterMetadata.getJobClusterDefinition().getIsReadyForJobMaster()) .withJars(jobClusterMetadata.getJobClusterDefinition().getJobClusterConfigs()) .withJobOwner(jobClusterMetadata.getJobClusterDefinition().getOwner()) .withLabels(jobClusterMetadata.getJobClusterDefinition().getLabels()) .withLastJobCount(jobClusterMetadata.getLastJobCount()) .withSla(jobClusterMetadata.getJobClusterDefinition().getSLA()) .withMigrationConfig(jobClusterMetadata.getJobClusterDefinition().getWorkerMigrationConfig()) .withParameters(jobClusterMetadata.getJobClusterDefinition().getParameters()) .isCronActive(cronActive) .withLatestVersion(jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig().getVersion()) .build(); } @Override public void onJobSubmit(final SubmitJobRequest request) { final ActorRef sender = getSender(); // if the job is submitted with a userDefinedType check to see if such a job is already running. If so just reply with a reference to it. if(request.getJobDefinition().isPresent()) { String uniq = request.getJobDefinition().get().getJobSla().getUserProvidedType(); if(uniq != null && !uniq.isEmpty()) { Optional<JobInfo> existingJob = jobManager.getJobInfoByUniqueId(uniq); if(existingJob.isPresent()) { logger.info("Job with unique {} already exists, returning its job Id {}", uniq, existingJob.get().jobId); sender.tell(new SubmitJobResponse(request.requestId, SUCCESS, existingJob.get().jobId.getId(), of(existingJob.get().jobId)), getSelf()); return; } } } logger.info("Submitting job "); try { JobDefinition resolvedJobDefn = getResolvedJobDefinition(request.getSubmitter(),request.getJobDefinition()); eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobClusterStatusEvent(LifecycleEventsProto.StatusEvent.StatusEventType.INFO, "Job submit request received", jobClusterMetadata.getJobClusterDefinition().getName())); resolvedJobDefn = LabelManager.insertSystemLabels(resolvedJobDefn, request.isAutoResubmit()); submitJob(resolvedJobDefn, sender, request.getSubmitter()); numJobSubmissions.increment(); } catch (PersistException pe) { logger.error("Exception submitting job {} from {}", request.getClusterName(), request.getSubmitter(), pe); numJobSubmissionFailures.increment(); sender.tell(new SubmitJobResponse(request.requestId, SERVER_ERROR, pe.getMessage(), empty()), getSelf()); } catch (Exception e) { logger.error("Exception submitting job {} from {}", request.getClusterName(), request.getSubmitter(), e); numJobSubmissionFailures.increment(); sender.tell(new SubmitJobResponse(request.requestId, CLIENT_ERROR, e.getMessage(), empty()), getSelf()); } } /** * Two cases * 1. JobDefinition provided by user: In this case check if labels / parameters or schedulingInfo was not provided * if that is the case inherit from the Cluster * 2. If JobDefinition is not provided, find the last submitted job and use its config (quick submit) * @param user submitter * @param givenJobDefnOp job defn provided by user in job submit * @return jobdefinition to be used by the actual submit * @throws Exception If jobDefinition could not be resolved */ private JobDefinition getResolvedJobDefinition(final String user, final Optional<JobDefinition> givenJobDefnOp) throws Exception { JobDefinition resolvedJobDefn; if(givenJobDefnOp.isPresent()) { resolvedJobDefn = givenJobDefnOp.get(); } else { // no job definition specified , this is quick submit which is supposed to inherit from last job submitted List<JobInfo> existingJobsList = new ArrayList<>(jobManager.getAllNonTerminalJobsList()); Optional<JobDefinition> jobDefnOp = createNewJobDefinitionFromLastSubmittedInheritSchedInfoAndParameters(existingJobsList, jobManager.getCompletedJobsList(), empty(),jobStore); if(jobDefnOp.isPresent()) { logger.info("Inherited scheduling Info and parameters from previous job"); resolvedJobDefn = jobDefnOp.get(); } else { throw new Exception("Job Definition could not retrieved from a previous submission (There may not be a previous submission)"); } } logger.info("Resolved JobDefn {}", resolvedJobDefn); return this.jobDefinitionResolver.getResolvedJobDefinition(user,resolvedJobDefn,this.jobClusterMetadata); } private void submitJob(JobDefinition jobDefinition, ActorRef sender, String user) throws PersistException { if(logger.isTraceEnabled()) { logger.trace("Enter submitJobb"); } JobId jId = null; try { validateJobDefinition(jobDefinition); long lastJobIdNumber = jobClusterMetadata.getLastJobCount(); jId = new JobId(name, ++lastJobIdNumber); logger.info("Creating new job id: " + jId + " with job defn " + jobDefinition); MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder() .withJobId(jId) .withSubmittedAt(Instant.now()) .withJobState(JobState.Accepted) .withNextWorkerNumToUse(1) .withJobDefinition(jobDefinition) .build(); eventPublisher.publishAuditEvent( new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_SUBMIT, jId.getId(), jId + " submitter: " + user) ); jobManager.initJob(mantisJobMetaData, jobClusterMetadata, sender); numJobActorCreationCounter.increment(); jobClusterMetadata = new JobClusterMetadataImpl.Builder().withJobClusterDefinition((JobClusterDefinitionImpl)this.jobClusterMetadata.getJobClusterDefinition()) .withLastJobCount(lastJobIdNumber) .withIsDisabled(jobClusterMetadata.isDisabled()) .build(); try { jobStore.updateJobCluster(jobClusterMetadata); } catch (Exception e) { logger.error("Failed to persist job cluster {} error {}", jobClusterMetadata, e.getMessage(), e); numJobSubmissionFailures.increment(); cleanUpOnJobSubmitFailure(jId); throw new PersistException(e); } jobIdSubmissionSubject.onNext(jId); numJobSubmissions.increment(); } catch (PersistException pe) { throw pe; } catch (InvalidJobRequest e) { logger.error( "Invalid jobcluster : {} error {}", jobClusterMetadata, e.getMessage(), e); numJobSubmissionFailures.increment(); throw new IllegalArgumentException(e); } catch (Exception e) { logger.error("Exception persisting job in store", e); numJobSubmissionFailures.increment(); cleanUpOnJobSubmitFailure(jId); throw new IllegalStateException(e); } if(logger.isTraceEnabled()) { logger.trace("Exit submitJob"); } } @Override public void onJobInitialized(JobProto.JobInitialized jobInited) { if(logger.isTraceEnabled()) { logger.trace("Enter onJobInitialized"); } jobManager.markJobInitialized(jobInited.jobId, System.currentTimeMillis()); if(jobInited.responseCode == SUCCESS) { jobInited.requestor.tell(new SubmitJobResponse(jobInited.requestId, SUCCESS, jobInited.jobId.getId(), of(jobInited.jobId)), getSelf()); numJobsInitialized.increment(); } else { logger.warn("Job was not initialized {}" , jobInited); Optional<JobInfo> jobInfo = jobManager.getJobInfoForNonTerminalJob(jobInited.jobId); if(jobInfo.isPresent()) { cleanUpOnJobSubmitFailure(jobInfo.get().jobId); // if this is not a cron submission inform the caller if(jobInited.requestor != null) jobInited.requestor.tell(new SubmitJobResponse(jobInited.requestId, jobInited.responseCode, "Job " + jobInited.jobId + " submission failed", ofNullable(jobInited.jobId)), getSelf()); } else { logger.warn("No such job found {}", jobInited.jobId); } } if(logger.isTraceEnabled()) { logger.trace("Exit onJobInitialized"); } } /** * When a Job starts evaluate SLA to ensure the number of running jobs satisfies the SLA * @param startedEvent JobStarted Event */ @Override public void onJobStarted(final JobStartedEvent startedEvent) { logger.info("job {} started event", startedEvent.jobid); Optional<JobInfo> jobInfoOp = jobManager.getJobInfoForNonTerminalJob(startedEvent.jobid); if(jobInfoOp.isPresent()) { // enforce SLA jobManager.markJobStarted(jobInfoOp.get()); getSelf().tell(new JobClusterProto.EnforceSLARequest(Instant.now(), of(jobInfoOp.get().jobDefinition)), getSelf()); } } private void cleanUpOnJobSubmitFailure(JobId jId) { if(logger.isTraceEnabled()) { logger.trace("Enter cleanUpOnJobSubmitFailure {}", jId); } if(jId != null) { Optional<JobInfo> jobInfoOp = jobManager.getJobInfoForNonTerminalJob(jId); if (jobInfoOp.isPresent()) { // ensure there is a record of this job JobInfo jobInfo = jobInfoOp.get(); if (jobManager.markJobTerminating(jobInfo, JobState.Failed)) { // mark job as terminating getContext().unwatch(jobInfo.jobActor); getContext().stop(jobInfo.jobActor); jobManager.markCompleted(jId, empty(), JobState.Failed); // clear it from initializing table if present jobManager.markJobInitialized(jId, System.currentTimeMillis()); } else { logger.warn("cleanup on Job Submit failure failed for job {}", jId); } } } else { logger.warn("cleanup on Job Submit failure failed as there was no JobId"); } if(logger.isTraceEnabled()) { logger.trace("Exit cleanUpOnJobSubmitFailure {}", jId); } } /** * * @param definition Job Definition to be validated * @throws InvalidJobRequest If the job definition is invalid */ private void validateJobDefinition(JobDefinition definition) throws InvalidJobRequest { if (definition == null){ throw new InvalidJobRequest(null, "MantisJobDefinition cannot be null"); } if (definition.getArtifactName() == null){ throw new InvalidJobRequest(null, "MantisJobDefinition job artifactName attribute cannot be null"); } if (definition.getName() == null){ throw new InvalidJobRequest(null, "MantisJobDefinition name attribute cannot be null"); } if (definition.getSchedulingInfo() == null){ throw new InvalidJobRequest(null, "MantisJobDefinition schedulingInfo cannot be null"); } for(StageSchedulingInfo ssi : definition.getSchedulingInfo().getStages().values()) { List<JobConstraints> hardConstraints = ssi.getHardConstraints(); List<JobConstraints> softConstraints = ssi.getSoftConstraints(); validateConstraints(softConstraints,hardConstraints); }; } private void validateConstraints(List<JobConstraints> softConstraints, List<JobConstraints> hardConstraints) throws InvalidJobRequest{ // ok to have null constraints as they will get replaced later with empty list in JobActor.setupStageWorkers if(softConstraints != null) { for (JobConstraints jc : softConstraints) { if (ConstraintsEvaluators.softConstraint(jc, new HashSet<>()) == null) { logger.error("Invalid Soft Job Constraint {}", jc); throw new InvalidJobRequest(null, "Unknown constraint " + jc); } } ; } if(hardConstraints != null ) { for (JobConstraints jc : hardConstraints) { if (ConstraintsEvaluators.hardConstraint(jc, new HashSet<>()) == null) { logger.error("Invalid Hard Job Constraint {}", jc); throw new InvalidJobRequest(null, "Unknown constraint " + jc); } } ; } } @Override public void onWorkerEvent(WorkerEvent r) { if(logger.isTraceEnabled()) { logger.trace("Enter onWorkerEvent {}", r); } Optional<JobInfo> jobInfo = jobManager.getJobInfoForNonTerminalJob(r.getWorkerId().getJobId()); if(jobInfo.isPresent()) { jobInfo.get().jobActor.forward(r, getContext()); } else { if(!JobHelper.isTerminalWorkerEvent(r)) { logger.warn("Event from worker {} has no valid running job. Terminating worker ", r.getWorkerId()); Optional<String> host = JobHelper.getWorkerHostFromWorkerEvent(r); mantisScheduler.unscheduleAndTerminateWorker(r.getWorkerId(), host); } else { logger.warn("Terminal Event from worker {} has no valid running job. Ignoring event ", r.getWorkerId()); } } if(logger.isTraceEnabled()) { logger.trace("Exit onWorkerEvent {}", r); } } /** * @param req Resubmit worker message */ @Override public void onResubmitWorkerRequest(ResubmitWorkerRequest req) { if(logger.isTraceEnabled()) { logger.trace("Enter onResubmitWorkerRequest {}", req); } onResubmitWorker(req); if(logger.isTraceEnabled()) { logger.trace("Exit onResubmitWorkerRequest {}", req); } } /** * Can be invoked in two ways * 1. User requests a job termination * 2. The job itself requests a termination due to * a. Too many worker resubmits * b. Max runtime limit has reached * c. Subscription timeout reached * @param req Kill job request message */ @Override public void onJobKillRequest(KillJobRequest req) { logger.info("JobClusterActor.onKillJobRequest {}", req); Optional<JobInfo> jobInfo = jobManager.getJobInfoForNonTerminalJob(req.jobId); ActorRef sender = getSender(); if(jobInfo.isPresent() && jobManager.markJobTerminating(jobInfo.get(), JobState.Failed)) { jobInfo.get().jobActor.tell(req, getSelf()); } else { logger.info("Job {} not found", req.jobId.getId() ); req.requestor.tell(new JobClusterManagerProto.KillJobResponse(req.requestId, CLIENT_ERROR_NOT_FOUND, JobState.Noop, "Job " + req.jobId + " not found", req.jobId, req.user), getSelf()); } } /** * Sent by job actor when the job shutdown is initiated. * @param resp Kill job response message */ @Override public void onKillJobResponse(JobClusterProto.KillJobResponse resp) { if(logger.isTraceEnabled()) { logger.trace("Enter onKillJobResponse {}", resp); } if (resp.responseCode == SUCCESS) { Optional<JobInfo> jInfo = jobManager.getJobInfoForNonTerminalJob(resp.jobId); if(jInfo.isPresent() ) { // stop watching actor getContext().unwatch(jInfo.get().jobActor); numJobShutdowns.increment(); logger.info("Marking job {} as terminated", jInfo.get().jobId); // check requestor is not self to avoid an infinite loop if (resp.requestor != null && !getSelf().equals(resp.requestor)) { resp.requestor.tell( new KillJobResponse(resp.requestId, resp.responseCode, resp.state, resp.message, resp.jobId, resp.user), getSelf()); } Optional<CompletedJob> completedJob = jobManager.markCompleted(resp.jobId, resp.jobMetadata, resp.state); if(completedJob.isPresent()) { logger.info("In cleanupAfterJobKill for Job {} in state {} and metadata {} ", resp.jobId, resp.state,resp.jobMetadata); // enforce SLA if(!jobClusterMetadata.isDisabled()) { SLA sla = this.jobClusterMetadata.getJobClusterDefinition().getSLA(); if(sla.getMin() == 0 && sla.getMax() == 0) { logger.info("No SLA specified nothing to enforce {}", sla); } else { try { // first check if response has job meta for last job Optional<IMantisJobMetadata> cJob = (resp.jobMetadata); if (cJob == null || !cJob.isPresent()) { // else check archived jobs cJob = jobStore.getArchivedJob(completedJob.get().getJobId()); } if( cJob != null && cJob.isPresent()) { getSelf().tell(new JobClusterProto.EnforceSLARequest(Instant.now(), of(cJob.get().getJobDefinition())), ActorRef.noSender()); } else { logger.warn("Could not load last terminated job to use for triggering enforce SLA"); } } catch (Exception e) { // should not get here logger.warn("Exception {} loading completed Job {} to enforce SLA due", e.getMessage(), completedJob.get().getJobId()); } } } } else { logger.warn("Unable to mark job {} completed. ", resp.jobId); } } else { // should not get here if (resp.requestor != null && !getSelf().equals(resp.requestor)) { resp.requestor.tell( new KillJobResponse(resp.requestId, CLIENT_ERROR, JobState.Noop, "Job not found", resp.jobId, resp.user), getSelf()); } } } else { if (resp.requestor != null && !getSelf().equals(resp.requestor)) { // kill job was not successful relay to caller resp.requestor.tell( new KillJobResponse(resp.requestId, resp.responseCode, resp.state, resp.message, resp.jobId, resp.user), getSelf()); } } if(logger.isTraceEnabled()) { logger.trace("Exit onKillJobResponse {}", resp); } } @Override public void onGetJobDetailsRequest(GetJobDetailsRequest req) { if(logger.isTraceEnabled()) { logger.trace("Enter GetJobDetails {}", req); } GetJobDetailsResponse response = new GetJobDetailsResponse(req.requestId, CLIENT_ERROR_NOT_FOUND, "Job " + req.getJobId() + " not found", empty()); Optional<JobInfo> jInfo = jobManager.getJobInfoForNonTerminalJob(req.getJobId()); if(jInfo.isPresent()) { if(logger.isDebugEnabled()) { logger.debug("Forwarding getJobDetails to job actor for {}", req.getJobId()); } jInfo.get().jobActor.forward(req, getContext()); return; } else { // Could be a terminated job Optional<CompletedJob> completedJob = jobManager.getCompletedJob(req.getJobId()); if(completedJob.isPresent()) { if(logger.isDebugEnabled()) { logger.debug("Found Job {} in completed state ", req.getJobId()); } try { Optional<IMantisJobMetadata> jobMetaOp = jobStore.getArchivedJob(req.getJobId().getId()); if(jobMetaOp.isPresent()) { response = new GetJobDetailsResponse(req.requestId, SUCCESS, "", jobMetaOp); } else { response = new GetJobDetailsResponse(req.requestId, CLIENT_ERROR_NOT_FOUND, "Job " + req.getJobId() + " not found", empty()); } } catch (Exception e) { logger.warn("Exception {} reading Job {} from Storage ", e.getMessage(), req.getJobId()); response = new GetJobDetailsResponse(req.requestId, CLIENT_ERROR, "Exception reading Job " + req.getJobId() + " " + e.getMessage(), empty()); } } else { logger.warn("No such job {} ", req.getJobId()); } } getSender().tell(response, getSelf()); if(logger.isTraceEnabled()) { logger.trace("Exit GetJobDetails {}", req); } } @Override public void onGetLatestJobDiscoveryInfo(JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest request) { if(logger.isTraceEnabled()) { logger.trace("Enter onGetLatestJobDiscoveryInfo {}", request); } ActorRef sender = getSender(); if(this.name.equals(request.getJobCluster())) { JobId latestJobId = jobIdSubmissionSubject.getValue(); logger.debug("[{}] latest job Id for cluster: {}", name, latestJobId); if (latestJobId != null) { Optional<JobInfo> jInfo = jobManager.getJobInfoForNonTerminalJob(latestJobId); if (jInfo.isPresent()) { // ask job actor for discovery info jInfo.get().jobActor.forward(request, getContext()); } else { logger.info("job info not found for job ID when looking up discovery info: {}", latestJobId); sender.tell(new GetLatestJobDiscoveryInfoResponse(request.requestId, SERVER_ERROR, "JobInfo not found when looking up discovery info for " + latestJobId, empty()), getSelf()); } } else { // no latest job ID found for this job cluster logger.debug("no latest Job ID found for job cluster {}", name); sender.tell(new GetLatestJobDiscoveryInfoResponse(request.requestId, CLIENT_ERROR_NOT_FOUND, "No latest jobId found for job cluster " + name, empty()), getSelf()); } } else { String msg = "Job Cluster " + request.getJobCluster() + " In request does not match the name of this actor " + this.name; logger.warn(msg); sender.tell(new JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse(request.requestId, SERVER_ERROR, msg, empty()), getSelf()); } if(logger.isTraceEnabled()) { logger.trace("Exit onGetLatestJobDiscoveryInfo {}", request); } } @Override public void onGetJobStatusSubject(GetJobSchedInfoRequest request) { if(logger.isTraceEnabled()) { logger.trace("Enter onGetJobStatusSubject {}", request); } Optional<JobInfo> jInfo = jobManager.getJobInfoForNonTerminalJob(request.getJobId()); if(jInfo.isPresent()) { if(logger.isDebugEnabled()) { logger.debug("Forwarding getJobDetails to job actor for {}", request.getJobId()); } jInfo.get().jobActor.forward(request, getContext()); } else { // Could be a terminated job GetJobSchedInfoResponse response = new GetJobSchedInfoResponse(request.requestId, CLIENT_ERROR, "Job " + request.getJobId() + " not found or not active", empty()); getSender().tell(response, getSelf()); } if(logger.isTraceEnabled()) { logger.trace("Exit onGetJobStatusSubject "); } } @Override public void onGetLastSubmittedJobIdSubject(GetLastSubmittedJobIdStreamRequest request) { if(logger.isTraceEnabled()) { logger.trace("Enter onGetLastSubmittedJobIdSubject {}", request); } ActorRef sender = getSender(); if(this.name.equals(request.getClusterName())) { sender.tell(new GetLastSubmittedJobIdStreamResponse(request.requestId,SUCCESS,"",of(this.jobIdSubmissionSubject)),getSelf()); } else { String msg = "Job Cluster " + request.getClusterName() + " In request does not match the name of this actor " + this.name; logger.warn(msg); sender.tell(new GetLastSubmittedJobIdStreamResponse(request.requestId,CLIENT_ERROR ,msg,empty()),getSelf()); } if(logger.isTraceEnabled()) { logger.trace("Exit onGetLastSubmittedJobIdSubject {}", request); } } @Override public void onBookkeepingRequest(JobClusterProto.BookkeepingRequest request) { if(logger.isTraceEnabled()) { logger.trace("Enter onBookkeepingRequest for JobCluster {}", this.name); } // Enforce SLA if exists onEnforceSLARequest(new JobClusterProto.EnforceSLARequest()); // Tell all child jobs to migrate workers on disabled VMs (if any) jobManager.actorToJobIdMap.keySet().forEach(actorRef -> actorRef.tell(new JobProto.MigrateDisabledVmWorkersRequest(request.time), ActorRef.noSender())); if(logger.isTraceEnabled()) { logger.trace("Exit onBookkeepingRequest for JobCluster {}", name); } } @Override public void onEnforceSLARequest(JobClusterProto.EnforceSLARequest request) { if(logger.isTraceEnabled()) { logger.trace("Enter onEnforceSLA for JobCluster {} with request", this.name, request); } numSLAEnforcementExecutions.increment(); long now = request.timeOfEnforcement.toEpochMilli(); List<JobInfo> pendingInitializationJobsPriorToCutoff = jobManager.getJobActorsStuckInInit(now, getExpirePendingInitializeDelayMs()); List<JobInfo> jobsStuckInAcceptedList = jobManager.getJobsStuckInAccepted(now, getExpireAcceptedDelayMs()); List<JobInfo> jobsStuckInTerminatingList = jobManager.getJobsStuckInTerminating(now, getExpireAcceptedDelayMs()); if(!slaEnforcer.hasSLA()) { return; } int activeJobsCount = jobManager.activeJobsCount(); int acceptedJobsCount = jobManager.acceptedJobsCount(); // enforcing min int noOfJobsToLaunch = slaEnforcer.enforceSLAMin(activeJobsCount, acceptedJobsCount); if(noOfJobsToLaunch > 0) { logger.info("Submitting {} jobs for job name {} as active count is {} and accepted count is {}", noOfJobsToLaunch, name, activeJobsCount, acceptedJobsCount); String user = MANTIS_MASTER_USER; if(request.jobDefinitionOp.isPresent()) { user = request.jobDefinitionOp.get().getUser(); } for(int i=0; i< noOfJobsToLaunch; i++) { getSelf().tell(new SubmitJobRequest(name, user, true,request.jobDefinitionOp), getSelf()); } // enforce max. } else { List<JobInfo> listOfJobs = new ArrayList<>(activeJobsCount + acceptedJobsCount); listOfJobs.addAll(jobManager.getActiveJobsList()); listOfJobs.addAll(jobManager.getAcceptedJobsList()); List<JobId> jobsToKill = slaEnforcer.enforceSLAMax(Collections.unmodifiableList(listOfJobs)); for (JobId jobId : jobsToKill) { logger.info("Request termination for job {}", jobId); getSelf().tell( new KillJobRequest( jobId, "SLA enforcement", JobCompletedReason.Killed, MANTIS_MASTER_USER, ActorRef.noSender()), getSelf()); } } if(logger.isTraceEnabled()) { logger.trace("Exit onEnforceSLA for JobCluster {}", name); } } private long getExpireAcceptedDelayMs() { // stuck in accepted for more than 10mins // TODO make part of config return 10*60*1000; } /** * Create a new JobDefinition using the given job definition. Inherit everything except the artifact name and version. * @param jobDefinition * @return Optional JobDefinition */ private Optional<JobDefinition> createNewJobDefinitionInheritSchedInfoAndParameters(JobDefinition jobDefinition) { try { JobDefinition clonedJobDefn = new JobDefinition.Builder().withJobSla(jobDefinition.getJobSla()) .withLabels(jobDefinition.getLabels()) .withName(jobDefinition.getName()) .withParameters(jobDefinition.getParameters()) .withSchedulingInfo(jobDefinition.getSchedulingInfo()) .withNumberOfStages(jobDefinition.getNumberOfStages()) .withSubscriptionTimeoutSecs(jobDefinition.getSubscriptionTimeoutSecs()) .withUser(jobDefinition.getUser()) .build(); return of(clonedJobDefn); } catch (Exception e) { logger.warn("Could not clone JobDefinition {} due to {}", jobDefinition, e.getMessage()); e.printStackTrace(); } // should not get here return empty(); } /** * Fetch JobDefn of last job and clone it to a create a new one. Inherit the schedulingInfo and parameters * @param existingJobsList * @param completedJobs * @param jobDefinitionOp * @param store * @return */ private Optional<JobDefinition> createNewJobDefinitionFromLastSubmittedInheritSchedInfoAndParameters(final List<JobInfo> existingJobsList, final List<CompletedJob> completedJobs, Optional<JobDefinition> jobDefinitionOp, MantisJobStore store) { if(logger.isTraceEnabled()) { logger.trace("Enter createNewJobDefinitionFromLastSubmittedInheritSchedInfoAndParameters"); } Optional<JobDefinition> lastSubmittedJobDefn = getLastSubmittedJobDefinition(existingJobsList, completedJobs, jobDefinitionOp, store); if(lastSubmittedJobDefn.isPresent()) { if(logger.isTraceEnabled()) { logger.trace("Exit createNewJobDefinitionFromLastSubmittedInheritSchedInfoAndParameters"); } return createNewJobDefinitionInheritSchedInfoAndParameters(lastSubmittedJobDefn.get()); } if(logger.isTraceEnabled()) { logger.trace("Exit createNewJobDefinitionFromLastSubmittedInheritSchedInfoAndParameters empty"); } return empty(); } @Override public void onExpireOldJobs(JobClusterProto.ExpireOldJobsRequest request) { final long tooOldCutOff = System.currentTimeMillis() - (getTerminatedJobToDeleteDelayHours()*3600000L); jobManager.purgeOldCompletedJobs(tooOldCutOff); } private long getExpirePendingInitializeDelayMs() { // jobs older than 60 secs return 60*1000; } /** * When cron fires * if a cron policy is keep_new then submit a new job * else skip if a job is running at the moment, if not then submit a new job * @param request Cron fired event */ @Override public void onTriggerCron(JobClusterProto.TriggerCronRequest request) { if(logger.isTraceEnabled()) { logger.trace("Enter onTriggerCron for Job Cluster {}", this.name);} if(jobClusterMetadata.getJobClusterDefinition().getSLA().getCronPolicy() != null) { if(jobClusterMetadata.getJobClusterDefinition().getSLA().getCronPolicy() == CronPolicy.KEEP_NEW || this.jobManager.getAllNonTerminalJobsList().size() == 0) { getSelf().tell(new SubmitJobRequest(name, MANTIS_MASTER_USER, empty()), getSelf()); } else { // A job is already running skip resubmiting logger.info(name + ": Skipping submitting new job upon cron trigger, one exists already"); } } if(logger.isTraceEnabled()) { logger.trace("Exit onTriggerCron Triggered for Job Cluster {}", this.name);} } private long getTerminatedJobToDeleteDelayHours() { return ConfigurationProvider.getConfig().getTerminatedJobToDeleteDelayHours(); } @Override public void onJobClusterUpdateSLA(UpdateJobClusterSLARequest slaRequest) { if(logger.isTraceEnabled()) { logger.trace("Enter onJobClusterUpdateSLA {}", slaRequest); } ActorRef sender = getSender(); try { SLA newSla = new SLA(slaRequest.getMin(), slaRequest.getMax(), slaRequest.getCronSpec(), slaRequest.getCronPolicy()); JobClusterDefinitionImpl updatedDefn = new JobClusterDefinitionImpl.Builder().from(jobClusterMetadata.getJobClusterDefinition()) .withSla(newSla) .build(); boolean isDisabled = jobClusterMetadata.isDisabled(); if(slaRequest.isForceEnable() && jobClusterMetadata.isDisabled()) { isDisabled = false; } IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder() .withIsDisabled(isDisabled) .withLastJobCount(jobClusterMetadata.getLastJobCount()) .withJobClusterDefinition(updatedDefn) .build(); updateAndSaveJobCluster(jobCluster); if(cronManager != null) cronManager.destroyCron(); this.cronManager = new CronManager(name, getSelf(), newSla); sender.tell(new UpdateJobClusterSLAResponse(slaRequest.requestId, SUCCESS, name + " SLA updated"), getSelf()); eventPublisher.publishAuditEvent( new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_UPDATE, jobClusterMetadata.getJobClusterDefinition().getName(), name+" SLA update") ); } catch(IllegalArgumentException e) { logger.error("Invalid arguement job cluster not updated ", e); sender.tell(new UpdateJobClusterSLAResponse(slaRequest.requestId, CLIENT_ERROR, name + " Job cluster SLA updation failed " + e.getMessage()), getSelf()); } catch(Exception e) { logger.error("job cluster not updated ", e); sender.tell(new UpdateJobClusterSLAResponse(slaRequest.requestId, SERVER_ERROR, name + " Job cluster SLA updation failed " + e.getMessage()), getSelf()); } if(logger.isTraceEnabled()) { logger.trace("Exit onJobClusterUpdateSLA {}", slaRequest); } } @Override public void onJobClusterUpdateLabels(UpdateJobClusterLabelsRequest labelRequest) { if(logger.isTraceEnabled()) { logger.trace("Enter onJobClusterUpdateLabels {}", labelRequest); } ActorRef sender = getSender(); try { JobClusterConfig newConfig = new JobClusterConfig.Builder().from(jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig()) .build(); JobClusterDefinitionImpl updatedDefn = new JobClusterDefinitionImpl.Builder().from(jobClusterMetadata.getJobClusterDefinition()) .withJobClusterConfig(newConfig) .withLabels(labelRequest.getLabels()) .build(); IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder() .withIsDisabled(jobClusterMetadata.isDisabled()) .withLastJobCount(jobClusterMetadata.getLastJobCount()) .withJobClusterDefinition(updatedDefn) .build(); updateAndSaveJobCluster(jobCluster); sender.tell(new UpdateJobClusterLabelsResponse(labelRequest.requestId, SUCCESS, name + " labels updated"), getSelf()); eventPublisher.publishAuditEvent( new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_UPDATE, jobClusterMetadata.getJobClusterDefinition().getName(), name + " update labels") ); } catch(Exception e) { logger.error("job cluster labels not updated ", e); sender.tell(new UpdateJobClusterLabelsResponse(labelRequest.requestId, SERVER_ERROR, name + " labels updation failed " + e.getMessage()), getSelf()); } if(logger.isTraceEnabled()) { logger.trace("Exit onJobClusterUpdateLabels {}", labelRequest); } } @Override public void onJobClusterUpdateArtifact(UpdateJobClusterArtifactRequest artifactReq) { if(logger.isTraceEnabled()) { logger.trace("Entering JobClusterActor:onJobClusterUpdateArtifact"); } ActorRef sender = getSender(); try { if(!isVersionUnique(artifactReq.getVersion(), jobClusterMetadata.getJobClusterDefinition().getJobClusterConfigs())) { String msg = String.format("job cluster %s not updated as the version %s is not unique", name,artifactReq.getVersion()); logger.error(msg); sender.tell(new UpdateJobClusterArtifactResponse(artifactReq.requestId, CLIENT_ERROR, msg), getSelf()); return; } JobClusterConfig newConfig = new JobClusterConfig.Builder().from(jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig()) .withArtifactName(artifactReq.getArtifactName()) .withVersion(artifactReq.getVersion()) .withUploadedAt(System.currentTimeMillis()) .build(); JobClusterDefinitionImpl updatedDefn = new JobClusterDefinitionImpl.Builder().from(jobClusterMetadata.getJobClusterDefinition()) .withJobClusterConfig(newConfig) .build(); IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder() .withIsDisabled(jobClusterMetadata.isDisabled()) .withLastJobCount(jobClusterMetadata.getLastJobCount()) .withJobClusterDefinition(updatedDefn) .build(); updateAndSaveJobCluster(jobCluster); sender.tell(new UpdateJobClusterArtifactResponse(artifactReq.requestId, SUCCESS, name + " artifact updated"), getSelf()); eventPublisher.publishAuditEvent( new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_UPDATE, jobClusterMetadata.getJobClusterDefinition().getName(), name + " artifact update") ); if(!artifactReq.isSkipSubmit()) { getSelf().tell(new SubmitJobRequest(name,artifactReq.getUser(), (empty())), getSelf()); } } catch(Exception e) { logger.error("job cluster not updated ", e); sender.tell(new UpdateJobClusterArtifactResponse(artifactReq.requestId, SERVER_ERROR, name + " Job cluster artifact updation failed " + e.getMessage()), getSelf()); } if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:onJobClusterUpdateArtifact"); } } boolean isVersionUnique(String artifactVersion, List<JobClusterConfig> existingConfigs) { if(logger.isTraceEnabled()) { logger.trace("Enter JobClusterActor {} isVersionnique {} existing versions {}",name,artifactVersion,existingConfigs);} for(JobClusterConfig config : existingConfigs) { if(config.getVersion().equals(artifactVersion)) { logger.info("Given Version {} is not unique during UpdateJobCluster {}",artifactVersion, name); return false; } } return true; } //TODO validate the migration config json @Override public void onJobClusterUpdateWorkerMigrationConfig(UpdateJobClusterWorkerMigrationStrategyRequest req) { if(logger.isTraceEnabled()) { logger.trace("Entering JobClusterActor:onJobClusterUpdateWorkerMigrationConfig {}", req); } ActorRef sender = getSender(); try { JobClusterDefinitionImpl updatedDefn = new JobClusterDefinitionImpl.Builder().from(jobClusterMetadata.getJobClusterDefinition()) .withMigrationConfig(req.getMigrationConfig()) .build(); IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder() .withIsDisabled(jobClusterMetadata.isDisabled()) .withLastJobCount(jobClusterMetadata.getLastJobCount()) .withJobClusterDefinition(updatedDefn) .build(); updateAndSaveJobCluster(jobCluster); sender.tell(new UpdateJobClusterWorkerMigrationStrategyResponse(req.requestId, SUCCESS, name + " worker migration config updated"), getSelf()); eventPublisher.publishAuditEvent( new LifecycleEventsProto.AuditEvent(LifecycleEventsProto.AuditEvent.AuditEventType.JOB_CLUSTER_UPDATE, jobClusterMetadata.getJobClusterDefinition().getName(), name + " worker migration config update") ); } catch(Exception e) { logger.error("job cluster migration config not updated ", e); sender.tell(new UpdateJobClusterWorkerMigrationStrategyResponse(req.requestId, SERVER_ERROR, name + " Job cluster worker migration config updation failed " + e.getMessage()), getSelf()); } if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:onJobClusterUpdateWorkerMigrationConfig {}", req); } } private void updateAndSaveJobCluster(IJobClusterMetadata jobCluster) throws Exception { if(logger.isTraceEnabled()) { logger.trace("Entering JobClusterActor:updateAndSaveJobCluster {}", jobCluster.getJobClusterDefinition().getName()); } jobStore.updateJobCluster(jobCluster); jobClusterMetadata = jobCluster; // enable cluster if if(!jobClusterMetadata.isDisabled()) { getContext().become(initializedBehavior); } slaEnforcer = new SLAEnforcer(jobClusterMetadata.getJobClusterDefinition().getSLA()); logger.info("succeesfully saved job cluster"); if(logger.isTraceEnabled()) { logger.trace("Exit JobClusterActor:updateAndSaveJobCluster {}", jobCluster.getJobClusterDefinition().getName()); } } /** * If a job definition is passed return it immediately * Else find the last submitted job, first look in currently running jobs, next look in completed job * @param existingJobsList existing job list * @param completedJobs completed job list * @param jobDefinitionOp optional job definition * @param store store reference if required to load from store * @return JobDefinition of last submitted job if found */ /*package protected*/ private Optional<JobDefinition> getLastSubmittedJobDefinition(final List<JobInfo> existingJobsList, final List<CompletedJob> completedJobs, Optional<JobDefinition> jobDefinitionOp, MantisJobStore store) { if(logger.isTraceEnabled()) { logger.trace("Entering getLastSubmittedJobDefinition"); } if(jobDefinitionOp.isPresent()) { return jobDefinitionOp; } Optional<JobId> lastJobId = JobListHelper.getLastSubmittedJobId(existingJobsList,completedJobs); if(lastJobId.isPresent()) { Optional<JobInfo> jobInfoForNonTerminalJob = jobManager.getJobInfoForNonTerminalJob(lastJobId.get()); if(jobInfoForNonTerminalJob.isPresent()) { if(logger.isTraceEnabled()) { logger.trace("Exit getLastSubmittedJobDefinition {}", jobInfoForNonTerminalJob.get().jobDefinition); } return of(jobInfoForNonTerminalJob.get().jobDefinition); } else { Optional<CompletedJob> completedJob = jobManager.getCompletedJob(lastJobId.get()); if(completedJob.isPresent()) { try { Optional<IMantisJobMetadata> archivedJob = store.getArchivedJob(completedJob.get().getJobId()); if(archivedJob.isPresent()) { if(logger.isTraceEnabled()) { logger.trace("Exit getLastSubmittedJobDefinition returning job {} with defn {}", archivedJob.get().getJobId(), archivedJob.get().getJobDefinition()); } return of(archivedJob.get().getJobDefinition()); } else { logger.warn("Could not find load archived Job {} for cluster {}", completedJob.get().getJobId(), name); } } catch (Exception e) { logger.warn("Archived Job {} could not be loaded from the store due to {} ", completedJob.get().getJobId(), e.getMessage()); } } else { logger.warn("Could not find any previous submitted/completed Job for cluster {}", name); } } } else { logger.warn("Could not find any previous submitted Job for cluster {}", name); } if(logger.isTraceEnabled()) { logger.trace("Exit getLastSubmittedJobDefinition empty"); } return empty(); } /** * 2 cases this can occur * 1. Graceful shutdown : Where the job cluster actor requests the job actor to terminate. In this case we simply clear the pending * delete jobs map * * 2. Unexpected shutdown : The job actor terminated unexpectedly in which case we need to relaunch the actor. * @param terminatedEvent Event describing a job actor was terminated */ private void onTerminated(Terminated terminatedEvent) { if(logger.isDebugEnabled()) { logger.debug("onTerminatedEvent {} ", terminatedEvent); } // TODO relaunch actor ? } @Override public void onScaleStage(ScaleStageRequest req) { if(logger.isTraceEnabled()) { logger.trace("Exit onScaleStage {}", req); } Optional<JobInfo> jobInfo = jobManager.getJobInfoForNonTerminalJob(req.getJobId()); ActorRef sender = getSender(); if(jobInfo.isPresent()) { jobInfo.get().jobActor.forward(req, getContext()); } else { sender.tell(new ScaleStageResponse(req.requestId, CLIENT_ERROR, "Job " + req.getJobId() + " not found. Could not scale stage to " + req.getNumWorkers(), 0), getSelf()); } if(logger.isTraceEnabled()) { logger.trace("Exit onScaleStage {}", req); } } @Override public void onResubmitWorker(ResubmitWorkerRequest req) { if(logger.isTraceEnabled()) { logger.trace("Exit JCA:onResubmitWorker {}", req); } Optional<JobInfo> jobInfo = jobManager.getJobInfoForNonTerminalJob(req.getJobId()); ActorRef sender = getSender(); if(jobInfo.isPresent()) { jobInfo.get().jobActor.forward(req, getContext()); } else { sender.tell(new ResubmitWorkerResponse(req.requestId, CLIENT_ERROR, "Job " + req.getJobId() + " not found. Could not resubmit worker"), getSelf()); } if(logger.isTraceEnabled()) { logger.trace("Exit JCA:onResubmitWorker {}", req); } } static final class JobInfo { final long submittedAt; public String version; volatile long initializeInitiatedAt = -1; volatile long initializedAt = -1; volatile long terminationInitiatedAt = -1; volatile long terminatedAt = -1; final JobId jobId; final ActorRef jobActor; volatile JobState state; final String user; final JobDefinition jobDefinition; JobInfo(JobId jobId, JobDefinition jobDefinition, long submittedAt, ActorRef jobActor, JobState state, String user, long initializeInitiatedAt, long initedAt) { this.submittedAt = submittedAt; this.jobActor = jobActor; this.jobId = jobId; this.state = state; this.user = user; this.jobDefinition = jobDefinition; this.initializeInitiatedAt = initializeInitiatedAt; this.initializedAt = initedAt; } @Override public String toString() { return "JobInfo{" + "submittedAt=" + submittedAt + ", initializeInitiatedAt=" + initializeInitiatedAt + ", initializedAt=" + initializedAt + ", terminationInitiatedAt=" + terminationInitiatedAt + ", terminatedAt=" + terminatedAt + ", jobId=" + jobId + ", jobActor=" + jobActor + ", state=" + state + ", user='" + user + '\'' + ", jobDefinition=" + jobDefinition + '}'; } void setInitializeInitiatedAt(long t) { this.initializeInitiatedAt = t; } void setInitializedAt(long t) { this.initializedAt = t; } void setState(JobState state) { this.state = state; } void setTerminationInitiatedAt(long terminationInitiatedAt) { this.terminationInitiatedAt = terminationInitiatedAt; } public void setTerminatedAt(long terminatedAt) { this.terminatedAt = terminatedAt; } JobInfo(JobId jobId, JobDefinition jobDefinition, long submittedAt, ActorRef jobActor, JobState state, String user) { this(jobId, jobDefinition, submittedAt, jobActor, state, user, -1, -1); } static class Builder { long submittedAt = -1; long initializeInitiatedAt = -1; long initializedAt = -1; JobId jobId = null; ActorRef jobActor = null; JobState state = null; String user = ""; JobDefinition jobDefinition = null; Builder withSubmittedAt(long submittedAt) { this.submittedAt = submittedAt; return this; } Builder withInitializeInitiatedAt(long t) { this.initializeInitiatedAt = t; return this; } Builder withInitializedAt(long t) { this.initializedAt = t; return this; } Builder withJobId(JobId jId) { this.jobId = jId; return this; } Builder withJobActor(ActorRef actor) { this.jobActor = actor; return this; } Builder withJobDefinition(JobDefinition jd) { this.jobDefinition = jd; return this; } Builder withUser(String user) { this.user = user; return this; } Builder withState(JobState state) { this.state = state; return this; } Builder usingJobMetadata(MantisJobMetadataImpl jobMeta, ActorRef actor) { this.jobId = jobMeta.getJobId(); this.jobDefinition = jobMeta.getJobDefinition(); this.submittedAt = jobMeta.getSubmittedAtInstant().toEpochMilli(); this.state = jobMeta.getState(); this.user = jobMeta.getUser(); this.jobActor = actor; return this; } JobInfo build() { Preconditions.checkNotNull(jobId, "JobId cannot be null"); Preconditions.checkNotNull(jobDefinition, "JobDefinition cannot be null"); Preconditions.checkNotNull(state, "state cannot be null"); Preconditions.checkNotNull(jobActor, "Job Actor cannot be null"); return new JobInfo(jobId,jobDefinition,submittedAt,jobActor,state,user,initializeInitiatedAt,initializedAt); } } } /** * Responsible of keeping track of Jobs Belonging to this cluster. * As a job moves from Accepted -> Launched -> Terminating -> Completed states it is moved between * the corresponding maps. * This class is NOT ThreadSafe the caller should ensure it is not accessed concurrently * @author njoshi * */ final static class JobManager { private final Logger logger = LoggerFactory.getLogger(JobManager.class); private final String name; // Map of Actor ref to JobId private final Map<ActorRef, JobId> actorToJobIdMap = new HashMap<>(); // Map of Job Actors pending initialization private final ConcurrentMap<JobId, JobInfo> pendingInitializationJobsMap = new ConcurrentHashMap<>(); // Map of Jobs in Launched state private final ConcurrentMap<JobId, JobInfo> activeJobsMap = new ConcurrentHashMap<>(); // Map of Jobs in accepted state private final ConcurrentMap<JobId, JobInfo> acceptedJobsMap = new ConcurrentHashMap<>(); private final Set<JobInfo> nonTerminalSortedJobSet = new TreeSet<>((o1, o2) -> { if (o1.submittedAt < o2.submittedAt) { return 1; } else if (o1.submittedAt > o2.submittedAt) { return -1; } else { return 0; } }); // Cache that deals with completed job private final CompletedJobCache completedJobsCache; // Map of Jobs in terminating state private final Map<JobId, JobInfo> terminatingJobsMap = new HashMap<>(); private final ActorContext context; private final MantisScheduler scheduler; private final LifecycleEventPublisher publisher; private final MantisJobStore jobStore; private final LabelCache labelCache = new LabelCache(); JobManager(String clusterName, ActorContext context, MantisScheduler scheduler, LifecycleEventPublisher publisher, MantisJobStore jobStore) { this.name = clusterName; this.jobStore = jobStore; this.context = context; this.scheduler = scheduler; this.publisher = publisher; this.completedJobsCache = new CompletedJobCache(name, labelCache); } /** * Invoked in a scheduled timer on the JobClusterActor to purge expired jobs * * @param tooOldCutOff Current cut off delta */ public void purgeOldCompletedJobs(long tooOldCutOff) { completedJobsCache.purgeOldCompletedJobs(tooOldCutOff, jobStore); } public void cleanupAllCompletedJobs() { completedJobsCache.forcePurgeCompletedJobs(jobStore); } Observable<JobProto.JobInitialized> bootstrapJob(MantisJobMetadataImpl jobMeta, IJobClusterMetadata jobClusterMetadata) { // create jobInfo JobInfo jobInfo = createJobInfoAndActorAndWatchActor(jobMeta, jobClusterMetadata); // add to appropriate map actorToJobIdMap.put(jobInfo.jobActor, jobInfo.jobId); if (jobInfo.state.equals(JobState.Accepted)) { acceptedJobsMap.put(jobInfo.jobId, jobInfo); nonTerminalSortedJobSet.add(jobInfo); } else if (jobInfo.state.equals(JobState.Launched)) { activeJobsMap.put(jobInfo.jobId, jobInfo); nonTerminalSortedJobSet.add(jobInfo); } else if (jobInfo.state.equals(JobState.Terminating_abnormal) || jobInfo.state.equals(JobState.Terminating_normal)) { terminatingJobsMap.put(jobInfo.jobId, jobInfo); nonTerminalSortedJobSet.add(jobInfo); } else { logger.warn("Unexpected job state {}", jobInfo.state); } long masterInitTimeoutSecs = ConfigurationProvider.getConfig().getMasterInitTimeoutSecs(); long timeout = ((masterInitTimeoutSecs - 60)) > 0 ? (masterInitTimeoutSecs - 60) : masterInitTimeoutSecs; Duration t = Duration.ofSeconds(timeout); // mark it as pending actor init markJobInitializeInitiated(jobInfo, System.currentTimeMillis()); CompletionStage<JobProto.JobInitialized> respCS = ask(jobInfo.jobActor, new JobProto.InitJob(ActorRef.noSender(), false), t) .thenApply(JobProto.JobInitialized.class::cast); return Observable.from(respCS.toCompletableFuture(), Schedulers.io()) .onErrorResumeNext(ex -> { logger.warn("caught exception {}", ex.getMessage(), ex); return Observable.just(new JobProto.JobInitialized(1, SERVER_ERROR, "Timeout initializing Job " + jobInfo.jobId + " exception -> " + ex.getMessage(), jobInfo.jobId, ActorRef.noSender())); }) .map((jobInited) -> { // once init response received remove from pending init map. markJobInitialized(jobInited.jobId, System.currentTimeMillis()); return jobInited; }) ; } JobInfo initJob(MantisJobMetadataImpl jobMeta, IJobClusterMetadata jobClusterMetadata, ActorRef sender) { JobInfo jobInfo = createJobInfoAndActorAndWatchActor(jobMeta, jobClusterMetadata); markJobAccepted(jobInfo); jobInfo.jobActor.tell(new JobProto.InitJob(sender, true), context.self()); markJobInitializeInitiated(jobInfo, System.currentTimeMillis()); return jobInfo; } JobInfo createJobInfoAndActorAndWatchActor(MantisJobMetadataImpl jobMeta, IJobClusterMetadata jobClusterMetadata) { ActorRef jobActor = context.actorOf(JobActor.props(jobClusterMetadata.getJobClusterDefinition(), jobMeta, jobStore, scheduler, publisher), "JobActor-" + jobMeta.getJobId().getId()); context.watch(jobActor); // Add to label cache labelCache.addJobIdToLabelCache(jobMeta.getJobId(), jobMeta.getLabels()); return new JobInfo.Builder() .usingJobMetadata(jobMeta, jobActor) .build(); } void markJobInitialized(JobId jobId, long ts) { JobInfo removed = this.pendingInitializationJobsMap.remove(jobId); if (removed != null) { removed.setInitializedAt(ts); } } void markJobInitializeInitiated(JobInfo jobInfo, long ts) { jobInfo.setInitializeInitiatedAt(ts); // mark it as pending actor init pendingInitializationJobsMap.put(jobInfo.jobId, jobInfo); } /** * During startup if a job is in terminal state then directly mark it as completed * * @param jobMeta job metadata of completed job */ void persistToCompletedJobAndArchiveJobTables(IMantisJobMetadata jobMeta) { completedJobsCache.persistToCompletedJobAndArchiveJobTables(jobMeta, jobStore); } /** * Used during bootstrap to add the list of completedJobs to cache * * @param completedJobsList */ void addCompletedJobsToCache(List<CompletedJob> completedJobsList) { completedJobsCache.addCompletedJobsToCache(completedJobsList); } /** * Called on Job Submit. Updates the acceptedJobsMap & actorMap * * @param jobInfo job info of accepted job * @return true if successful */ boolean markJobAccepted(JobInfo jobInfo) { boolean isSuccess = false; if (!jobInfo.state.isValidStateChgTo(JobState.Accepted) || activeJobsMap.containsKey(jobInfo.jobId) || terminatingJobsMap.containsKey(jobInfo.jobId) || completedJobsCache.containsKey(jobInfo.jobId)) { String warn = String.format("Job %s already exists", jobInfo.jobId); logger.warn(warn); } else { this.acceptedJobsMap.put(jobInfo.jobId, jobInfo); this.actorToJobIdMap.put(jobInfo.jobActor, jobInfo.jobId); nonTerminalSortedJobSet.add(jobInfo); isSuccess = true; } return isSuccess; } List<JobInfo> getPendingInitializationJobsPriorToCutoff(long ts) { return this.pendingInitializationJobsMap.values().stream().filter((jInfo) -> { if (jInfo.initializedAt == -1 && jInfo.initializeInitiatedAt < ts) { return true; } return false; }) .collect(Collectors.toList()); } /** * Transition job to terminating state. * * @param jobInfo For the job which is terminating * @param newState whether it is normal or abnormal termination * @return true if successful */ boolean markJobTerminating(JobInfo jobInfo, JobState newState) { boolean isSuccess = false; if (JobState.isTerminalState(newState) && jobInfo.state.isValidStateChgTo(newState)) { this.activeJobsMap.remove(jobInfo.jobId); this.acceptedJobsMap.remove(jobInfo.jobId); nonTerminalSortedJobSet.add(jobInfo); jobInfo.setState(newState); this.terminatingJobsMap.put(jobInfo.jobId, jobInfo); jobInfo.setTerminationInitiatedAt(System.currentTimeMillis()); isSuccess = true; } else { String warn = "Unexpected job terminating event " + jobInfo.jobId + " Invalid transition from state " + jobInfo.state + " to state " + newState + " "; logger.warn(warn); } return isSuccess; } /** * Marks the job as started by putting it into the activejobsmap * in case of a valid transition * * @param jobInfo job info for the job that just started * @return true if successful and false if failed due to an invalid transition */ boolean markJobStarted(JobInfo jobInfo) { boolean success = false; if (jobInfo.state.isValidStateChgTo(JobState.Launched)) { jobInfo.setState(JobState.Launched); // remove from accepted jobs map this.acceptedJobsMap.remove(jobInfo.jobId); // add to active jobs map this.activeJobsMap.put(jobInfo.jobId, jobInfo); nonTerminalSortedJobSet.add(jobInfo); success = true; } else { String warn = String.format("Unexpected job started event %s Invalid transition from state %s to state %s", jobInfo.jobId, jobInfo.state, JobState.Launched); logger.warn(warn); } return success; } Optional<CompletedJob> markCompleted(JobId jId, Optional<IMantisJobMetadata> jobMetadata, JobState state) { return markCompleted(jId, System.currentTimeMillis(), jobMetadata, state); } /** * Invoked during clean up phase when the Job Actor has informed the Cluster that all workers have been terminated * * @param jId job id of the job that completed * @return An instance of CompletedJob that would be used to persist to storage. */ Optional<CompletedJob> markCompleted(JobId jId, long completionTime, Optional<IMantisJobMetadata> jobMetadata, JobState state) { if (logger.isTraceEnabled()) { logger.trace("Enter markCompleted job {}", jId); } Optional<JobInfo> jobInfoOp = getJobInfoForNonTerminalJob(jId); if (jobInfoOp.isPresent()) { JobInfo jInfo = jobInfoOp.get(); jInfo.state = state; jInfo.setTerminatedAt(completionTime); this.acceptedJobsMap.remove(jId); this.terminatingJobsMap.remove(jId); this.activeJobsMap.remove(jId); this.actorToJobIdMap.remove(jobInfoOp.get().jobActor); this.nonTerminalSortedJobSet.remove(jInfo); if (logger.isTraceEnabled()) { logger.trace("Exit markCompleted job {}", jId); } JobState finalState = JobState.Completed; String version = null; if(jobMetadata.isPresent()) { finalState = jobMetadata.get().getState(); version = jobMetadata.get().getJobDefinition().getVersion(); } return this.completedJobsCache.markCompleted(jId, jobMetadata, jInfo.submittedAt, completionTime, jInfo.user, version, finalState, jobStore); } else { logger.warn("No such job {}", jId); return empty(); } } void markCompletedDuringStartup(JobId jId, long completionTime, IMantisJobMetadata jobMetadata, JobState state) { if(logger.isTraceEnabled()) { logger.trace("Enter markCompletedDuringStartup job {}", jId);} JobState finalState = JobState.isTerminalState(jobMetadata.getState()) ? jobMetadata.getState() : JobState.Completed; String version = jobMetadata.getJobDefinition().getVersion(); this.completedJobsCache.markCompleted(jId,of(jobMetadata), jobMetadata.getSubmittedAtInstant().toEpochMilli(), completionTime, jobMetadata.getUser(), version, finalState, jobStore); } List<JobInfo> getAllNonTerminalJobsList() { List<JobInfo> allJobsList = new ArrayList<>(this.nonTerminalSortedJobSet); if(logger.isTraceEnabled()) { logger.trace("Exiting JobClusterActor:getAllNonTerminatlJobsList {}", allJobsList); } return allJobsList; } /** * List of Jobs in accepted state. * @return list of accepted job info */ List<JobInfo> getAcceptedJobsList() { List<JobInfo> acceptedJobsList = Lists.newArrayListWithExpectedSize(this.acceptedJobsCount()); acceptedJobsList.addAll(this.acceptedJobsMap.values()); return Collections.unmodifiableList(acceptedJobsList); } /** * List of Jobs in active state * @return list of active job info */ List<JobInfo> getActiveJobsList() { List<JobInfo> activeJobList = Lists.newArrayListWithExpectedSize(activeJobsMap.size()); activeJobList.addAll(this.activeJobsMap.values()); return Collections.unmodifiableList(activeJobList); } /** * List of jobs in completed state * @return list of completed jobs */ List<CompletedJob> getCompletedJobsList() { return new ArrayList<>(completedJobsCache.getCompletedJobSortedSet()); } List<JobInfo> getTerminatingJobsList() { List<JobInfo> terminatingJobsList = Lists.newArrayListWithExpectedSize(terminatingJobsMap.size()); terminatingJobsList.addAll(this.terminatingJobsMap.values()); return Collections.unmodifiableList(terminatingJobsList); } /** * No. of jobs in accepted state * @return no of accepted jobs */ int acceptedJobsCount() { return this.acceptedJobsMap.size(); } /** * No. of jobs in running state * @return no of active jobs */ int activeJobsCount() { return this.activeJobsMap.size(); } Optional<CompletedJob> getCompletedJob(JobId jId) { return completedJobsCache.getCompletedJob(jId); } Optional<IMantisJobMetadata> getJobDataForCompletedJob(String jId) { Optional<JobId> jobId = JobId.fromId(jId); if(jobId.isPresent()) { return completedJobsCache.getJobDataForCompletedJob(jobId.get(), jobStore); } else { logger.warn("Invalid Job Id {} in getJobDataForCompletedJob", jId); return empty(); } } /** * Returns the JobInfo associated with the JobId. The Job could be in Accepted, Launched or Terminating states * But not terminated state. * @param jId JobId whose JobInfo is being lookedup * @return JobInfo corresponding to the jobId, empty if not found */ Optional<JobInfo> getJobInfoForNonTerminalJob(JobId jId) { if(logger.isTraceEnabled() ) { logger.trace("In getJobInfo {}", jId); } if(acceptedJobsMap.containsKey(jId)) { if(logger.isDebugEnabled() ) { logger.debug("Found {} in accepted state", jId); } return of(acceptedJobsMap.get(jId)); } else if(activeJobsMap.containsKey(jId)) { if(logger.isDebugEnabled() ) { logger.debug("Found {} in active state", jId); } return of(activeJobsMap.get(jId)); } else if(this.terminatingJobsMap.containsKey(jId)) { if(logger.isDebugEnabled() ) { logger.debug("Found {} in terminating state", jId); } return of(terminatingJobsMap.get(jId)); } return empty(); } Optional<JobInfo> getJobInfoForNonTerminalJob(String jobId) { Optional<JobId> jId = JobId.fromId(jobId); if(jId.isPresent()) { return getJobInfoForNonTerminalJob(jId.get()); } return empty(); } Optional<JobInfo> getJobInfoByUniqueId(final String uniqueId) { return this.getAllNonTerminalJobsList().stream().filter((jobInfo) -> { String unq = jobInfo.jobDefinition.getJobSla().getUserProvidedType(); return unq != null && !unq.isEmpty() && unq.equals(uniqueId); }).findFirst(); } private List<JobInfo> getJobActorsStuckInInit(long now, long allowedDelay) { return getPendingInitializationJobsPriorToCutoff(now - allowedDelay) .stream() .peek((jobInfo) -> logger.warn("Job {} waiting for initialization since {}", jobInfo.jobId, jobInfo.initializeInitiatedAt)) .collect(Collectors.toList()); } private List<JobInfo> getJobsStuckInAccepted(long now, long allowedDelay) { return getAcceptedJobsList().stream() .filter((jobInfo -> jobInfo.submittedAt < now - allowedDelay)) .peek((jobInfo) -> logger.warn("Job {} stuck in accepted since {}", jobInfo.jobId, Instant.ofEpochMilli(jobInfo.submittedAt))) .collect(Collectors.toList()); } private List<JobInfo> getJobsStuckInTerminating(long now, long allowedDelay) { return getTerminatingJobsList().stream() .filter((jobInfo -> jobInfo.terminationInitiatedAt < now - allowedDelay)) .peek((jobInfo) -> logger.warn("Job {} stuck in terminating since {}", jobInfo.jobId, Instant.ofEpochMilli(jobInfo.terminationInitiatedAt))) .collect(Collectors.toList()); } boolean isJobListEmpty() { return activeJobsMap.isEmpty() && acceptedJobsMap.isEmpty(); } public Set<JobId> getJobsMatchingLabels(List<Label> labels, Optional<String> labelsOp) { boolean isAnd = false; if(labelsOp.isPresent()) { if(labelsOp.get().equalsIgnoreCase(LabelUtils.AND_OPERAND)) { isAnd = true; } } return labelCache.getJobIdsMatchingLabels(labels, isAnd); } } /** * Maintains a map of label to JobbId. Note the map is Label to Job Id and not * Label.key to JobId. * */ final static class LabelCache { final Map<Label, Set<JobId>> labelJobIdMap = new HashMap<>(); final Map<JobId, List<Label>> jobIdToLabelMap = new HashMap<>(); private final Logger logger = LoggerFactory.getLogger(LabelCache.class); /** * Invoked in the following ways * 1. During bootstrap of Job cluster when a Job Actor is created for an existing running job * 2. When a new Job Actor is created during job submission * 3. When the completed jobs list is being populated at bootstrap * @param jobId * @param labelList */ void addJobIdToLabelCache(JobId jobId,List<Label> labelList) { if(logger.isTraceEnabled()) { logger.trace("addJobIdToLabelCache " + jobId + " labelList " + labelList + " current map " + labelJobIdMap); } if(labelList == null) { return; } for(Label label : labelList) { Set<JobId> jobIds = labelJobIdMap.get(label); if(jobIds != null) { jobIds.add(jobId); } else { Set<JobId> jobIdList = new HashSet<>(); jobIdList.add(jobId); labelJobIdMap.put(label, jobIdList); } } jobIdToLabelMap.put(jobId, labelList); if(logger.isTraceEnabled()) { logger.trace("Exit addJobIdToLabelCache " + jobId + " labelList " + labelList + " new map " + labelJobIdMap); } } /** * Invoked when a job is completely purged from the system. * This happens after a completed job hits its expiry time. * @param jobId */ void removeJobIdFromLabelCache(JobId jobId) { if(logger.isTraceEnabled()) { logger.trace("removeJobIdFromLabelCache " + jobId + " current map " + labelJobIdMap);} List<Label> labels = jobIdToLabelMap.get(jobId); if(labels != null) { for(Label label : labels) { Set<JobId> jobIds = labelJobIdMap.get(label); jobIds.remove(jobId); if(jobIds.isEmpty()) { labelJobIdMap.remove(label); } } } jobIdToLabelMap.remove(jobId); if(logger.isTraceEnabled()) { logger.trace("Exit removeJobIdFromLabelCache " + jobId + " current map " + labelJobIdMap); } } /** * Invoked during jobList and jobIdList api calls. * 1. For each label find the Set of JobIds that have this label * 2. Then based on whether the query is an AND or OR perform a set * intersection or union and return the result. * @param labelList * @param isAnd * @return */ Set<JobId> getJobIdsMatchingLabels(List<Label> labelList, boolean isAnd) { if(logger.isTraceEnabled()) { logger.trace("Entering getJobidsMatchingLabels " + labelList + " is and ? " + isAnd + " with map " + labelJobIdMap); } Set<JobId> matchingJobIds = new HashSet<>(); List<Set<JobId>> matchingSubsets = new ArrayList<>(); if(labelList == null) { return matchingJobIds; } for(Label label : labelList) { if(labelJobIdMap.containsKey(label)) { Set<JobId> st = new HashSet<>(); st.addAll(labelJobIdMap.get(label)); matchingSubsets.add(st); } else { // label not present add empty set matchingSubsets.add(new HashSet<>()); } } Set<JobId> resu = (isAnd) ? getSetIntersection(matchingSubsets) : getSetUnion(matchingSubsets); if(logger.isTraceEnabled()) { logger.trace("Exiting getJobidsMatchingLabels " + resu); } return resu; } /** * Uses the built in feature of Set API to perform a union of 'n' sets * @param listOfSets * @return */ private Set<JobId> getSetUnion(List<Set<JobId>> listOfSets) { if(logger.isTraceEnabled()) { logger.trace("In getSetUnion " + listOfSets); } Set<JobId> unionSet = new HashSet<>(); if(listOfSets == null || listOfSets.isEmpty()) return unionSet; int i=0; unionSet = listOfSets.get(i); i++; while(i < listOfSets.size()) { Set<JobId> jobIds = listOfSets.get(i); unionSet.addAll(jobIds); i++; } if(logger.isTraceEnabled()) { logger.trace("Exit getSetUnion " + unionSet); } return unionSet; } /** * Uses the built in retainAll method to perform an intersection across * 'n' sets. * @param listOfSets * @return */ private Set<JobId> getSetIntersection(List<Set<JobId>> listOfSets) { if(logger.isTraceEnabled()) { logger.trace("In getSetIntersection " + listOfSets); } Set<JobId> intersectionSet = new HashSet<>(); if(listOfSets == null || listOfSets.isEmpty()) return intersectionSet; int i=0; intersectionSet = listOfSets.get(i); i++; while(i < listOfSets.size()) { Set<JobId> jobIds = listOfSets.get(i); intersectionSet.retainAll(jobIds); i++; } if(logger.isTraceEnabled()) { logger.trace("Return getSetIntersection " + intersectionSet); } return intersectionSet; } } /** * Consolidates all processing of completed jobs */ static class CompletedJobCache { private final Logger logger = LoggerFactory.getLogger(CompletedJobCache.class); // Set of sorted terminal jobs private final Set<CompletedJob> terminalSortedJobSet = new TreeSet<>((o1, o2) -> { if(o1.getTerminatedAt() < o2.getTerminatedAt()) { return 1; } else if(o1.getTerminatedAt() > o2.getTerminatedAt()) { return -1; } else { return 0; } }); // cluster name private final String name; // Map of completed jobs private final Map<JobId, CompletedJob> completedJobs = new HashMap<>(); // Labels lookup map private final LabelCache labelsCache; // Map of jobmetadata private final Map<JobId, IMantisJobMetadata> jobIdToMetadataMap = new HashMap<>(); public CompletedJobCache(String clusterName, LabelCache labelsCache) { this.name = clusterName; this.labelsCache = labelsCache; } public Set<CompletedJob> getCompletedJobSortedSet() { return terminalSortedJobSet; } public Optional<CompletedJob> getCompletedJob(JobId jId) { return ofNullable(completedJobs.getOrDefault(jId, null)); } /** * If job data exists in cache return it else call getArchiveJob * @param jId * @param jobStore * @return */ public Optional<IMantisJobMetadata> getJobDataForCompletedJob(JobId jId, MantisJobStore jobStore) { if(this.jobIdToMetadataMap.containsKey(jId)) { return of(jobIdToMetadataMap.get(jId)); } else { return jobStore.getArchivedJob(jId.getId()); } } public Set<JobId> getJobIdsMatchingLabels(List<Label> labelList, boolean isAnd) { return labelsCache.getJobIdsMatchingLabels(labelList, isAnd); } public Optional<CompletedJob> markCompleted(JobId jId, Optional<IMantisJobMetadata> jobMetadata, long submittedAt, long completionTime, String user, String version, JobState finalState, MantisJobStore jobStore) { // make sure its not already marked completed if(!completedJobs.containsKey(jId)) { // create completed job List<Label> labels = new ArrayList<>(); if(jobMetadata.isPresent()) { labels = jobMetadata.get().getLabels(); } final CompletedJob completedJob = new CompletedJob(name, jId.getId(), version, finalState, submittedAt, completionTime, user, labels); // add to sorted set terminalSortedJobSet.add(completedJob); try { // add to local cache and store table addToCacheAndSaveCompletedJobToStore(completedJob, jobMetadata, jobStore); } catch (Exception e) { logger.warn("Unable to save {} to completed jobs table due to {}", completedJob, e.getMessage()); } return of(completedJob); } else { logger.warn("Job {} already marked completed", jId); return of(completedJobs.get(jId)); } } /** * Completely delete jobs that are older than cut off * @param tooOldCutOff timestamp, all jobs having an older timestamp should be deleted * @param jobStore */ public void purgeOldCompletedJobs(long tooOldCutOff, MantisJobStore jobStore) { long numDeleted = 0; int maxJobsToPurge = ConfigurationProvider.getConfig().getMaxJobsToPurge(); final long startNanos = System.nanoTime(); for(Iterator<CompletedJob> it = completedJobs.values().iterator(); it.hasNext();) { if(numDeleted == maxJobsToPurge) { logger.info("{} Max clean up limit of {} reached. Stop clean up", name, maxJobsToPurge); break; } CompletedJob completedJob = it.next(); if(completedJob.getTerminatedAt() < tooOldCutOff) { try { logger.info("Purging Job {} as it was terminated at {} which is older than cutoff {}", completedJob, completedJob.getTerminatedAt(), tooOldCutOff); terminalSortedJobSet.remove(completedJob); jobStore.deleteJob(completedJob.getJobId()); jobStore.deleteCompletedJob(name, completedJob.getJobId()); it.remove(); Optional<JobId> jobId = JobId.fromId(completedJob.getJobId()); if(jobId.isPresent()) { this.jobIdToMetadataMap.remove(jobId.get()); labelsCache.removeJobIdFromLabelCache(jobId.get()); } } catch (Exception e) { logger.warn("Unable to purge job {} due to {}", completedJob, e.getMessage()); } numDeleted++; } else { if(logger.isDebugEnabled()) { logger.debug("Job {} was terminated at {} which is not older than cutoff {}",completedJob, completedJob.getTerminatedAt(), tooOldCutOff);} } } if (numDeleted > 0) { final long endNanos = System.nanoTime(); logger.info("Took {} micros to clean up {} jobs in cluster {} ", (endNanos - startNanos) / 1000, numDeleted, this.name); } } /** * During Job Cluster delete, purge all records of completed jobs * @param jobStore */ void forcePurgeCompletedJobs(MantisJobStore jobStore) { for(Iterator<CompletedJob> it = completedJobs.values().iterator(); it.hasNext();) { CompletedJob completedJob = it.next(); try { logger.info("Purging Job {} during job cluster cleanup", completedJob); terminalSortedJobSet.remove(completedJob); jobStore.deleteJob(completedJob.getJobId()); jobStore.deleteCompletedJob(name, completedJob.getJobId()); it.remove(); Optional<JobId> jobId = JobId.fromId(completedJob.getJobId()); if(jobId.isPresent()) { this.jobIdToMetadataMap.remove(jobId.get()); labelsCache.removeJobIdFromLabelCache(jobId.get()); } } catch (Exception e) { logger.warn("Unable to purge job {} due to {}", completedJob, e.getMessage()); } } } /** * During startup if a job is in terminal state then directly mark it as completed * @param jobMeta job metadata of completed job */ public void persistToCompletedJobAndArchiveJobTables(IMantisJobMetadata jobMeta, MantisJobStore jobStore) { try { Instant endedAt = jobMeta.getEndedAtInstant().orElse(Instant.now()); final CompletedJob completedJob = new CompletedJob(name, jobMeta.getJobId().getId(), null, jobMeta.getState(), jobMeta.getSubmittedAtInstant().toEpochMilli(), endedAt.toEpochMilli(), jobMeta.getUser(), jobMeta.getLabels()); addToCacheAndSaveCompletedJobToStore(completedJob, of(jobMeta), jobStore); // normally archiving is done by job actor, but these are jobs in active table that weren't archived jobStore.archiveJob(jobMeta); } catch (Exception e) { logger.warn("Unable to save completed job {} to store due to {}", jobMeta, e.getMessage()); } } private void addToCacheAndSaveCompletedJobToStore(CompletedJob completedJob, Optional<IMantisJobMetadata> jobMetaData, MantisJobStore jobStore) throws Exception { Optional<JobId> jId = JobId.fromId(completedJob.getJobId()); if(jId.isPresent()) { labelsCache.addJobIdToLabelCache( jId.get(),completedJob.getLabelList()); completedJobs.put(jId.get(), completedJob); terminalSortedJobSet.add(completedJob); if(jobMetaData.isPresent()) { jobIdToMetadataMap.put(jId.get(), jobMetaData.get()); } jobStore.storeCompletedJobForCluster(name, completedJob); } else { logger.warn("Invalid job id {} in addToCAcheAndSaveCompletedJobToStore ", completedJob); } } /** * Bulk add completed jobs to cache * @param completedJobsList */ public void addCompletedJobsToCache(List<CompletedJob> completedJobsList) { if(completedJobsList == null) { logger.warn("addCompletedJobsToCache called with null completedJobsList"); return; } this.terminalSortedJobSet.addAll(completedJobsList); completedJobsList.forEach((compJob) -> { Optional<JobId> jId= JobId.fromId(compJob.getJobId()); if(jId.isPresent()) { completedJobs.put(jId.get(), compJob); labelsCache.addJobIdToLabelCache(jId.get(), compJob.getLabelList()); } else { logger.warn("Invalid job Id {}", compJob.getJobId()); } }); } public boolean containsKey(JobId jobId) { return completedJobs.containsKey(jobId); } } static class CronManager { private static final TriggerOperator triggerOperator; private static final Logger logger = LoggerFactory.getLogger(CronManager.class); static { triggerOperator = new TriggerOperator(1); try { triggerOperator.initialize(); } catch (SchedulerException e) { logger.error("Unexpected: " + e.getMessage(), e); throw new RuntimeException(e); } } private final String cronSpec; private final CronPolicy policy; private final ActorRef clusterActor; private String triggerId; private final String jobClusterName; private String triggerGroup = null; private CronTrigger<ActorRef> scheduledTrigger; private boolean isCronActive = false; CronManager(String jobClusterName, ActorRef clusterActor, SLA sla) throws Exception { this.jobClusterName = jobClusterName; cronSpec = sla.getCronSpec(); policy = sla.getCronPolicy(); this.clusterActor = clusterActor; if(cronSpec != null) { initCron(); } } private void initCron() throws Exception{ if(cronSpec == null || triggerId != null) { return; } logger.info("Init'ing cron for " + jobClusterName); triggerGroup = jobClusterName + "-" + this; try { scheduledTrigger = new CronTrigger<>(cronSpec, jobClusterName, clusterActor, ActorRef.class, CronTriggerAction.class); triggerId = triggerOperator.registerTrigger(triggerGroup, scheduledTrigger); isCronActive = true; } catch (IllegalArgumentException e) { throw new SchedulerException(e.getMessage(), e); } } private void destroyCron() { try { if (triggerId != null) { logger.info("Destroying cron " + triggerId); triggerOperator.deleteTrigger(triggerGroup, triggerId); triggerId = null; isCronActive = false; } } catch (TriggerNotFoundException | SchedulerException e) { logger.warn("Couldn't delete trigger group " + triggerGroup + ", id " + triggerId); } } boolean isCronActive() { return isCronActive; } } public static class CronTriggerAction implements Action1<ActorRef> { @Override public void call(ActorRef jobClusterActor) { jobClusterActor.tell(new JobClusterProto.TriggerCronRequest(), ActorRef.noSender()); } } }
4,371
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/JobClusterMetadataImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.server.master.domain.IJobClusterDefinition; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.domain.JobId; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; /** * JobCluster ------------- String name, JobOwner owner, SLA { int slaMin, int slaMax, String cronSpec, CronPolicy policy, } WorkerMigrationConfig config, boolea readyForJobMaster, long lastJobCount boolean isdisabled jobDefinitions [{ String artifactName, String version, long uploadedAt, SchedulingInfo: [{ "1": { "numberOfInstances": 15, "machineDefinition": { "cpuCores": 4, "memoryMB": 12024, "networkMbps": 512, "diskMB": 10024, "numPorts": 1 }, "hardConstraints": [], "softConstraints": [], "scalingPolicy": null, "scalable": false }, { "2" : { "numberOfInstances": 15, "machineDefinition": { "cpuCores": 4, "memoryMB": 12024, "networkMbps": 512, "diskMB": 10024, "numPorts": 1 }, "hardConstraints": [], "softConstraints": [], "scalingPolicy": null, "scalable": false } } }], "parameters": [ { "name": "enableCompressedBinaryInput", "value": "True" }, { "name": "targetApp", "value": "^apiproxy.*" } ], }] } * @author njoshi * */ public class JobClusterMetadataImpl implements IJobClusterMetadata { final private IJobClusterDefinition jobClusterDefinition; final private long lastJobCount; final private boolean disabled; @JsonCreator @JsonIgnoreProperties(ignoreUnknown=true) public JobClusterMetadataImpl(@JsonProperty("jobClusterDefinition") JobClusterDefinitionImpl jcDefn, @JsonProperty("lastJobCount") long lastJobCount, @JsonProperty("disabled") boolean disabled) { this.jobClusterDefinition = jcDefn; this.lastJobCount = lastJobCount; this.disabled = disabled; } /* (non-Javadoc) * @see io.mantisrx.master.jobcluster.IJobClusterMetadata#getJobClusterDefinition() */ @Override public IJobClusterDefinition getJobClusterDefinition() { return jobClusterDefinition; } /* (non-Javadoc) * @see io.mantisrx.master.jobcluster.IJobClusterMetadata#getLastJobCount() */ @Override public long getLastJobCount() { return lastJobCount; } /* (non-Javadoc) * @see io.mantisrx.master.jobcluster.IJobClusterMetadata#isDisabled() */ @Override public boolean isDisabled() { return disabled; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; JobClusterMetadataImpl that = (JobClusterMetadataImpl) o; return lastJobCount == that.lastJobCount && disabled == that.disabled && Objects.equals(jobClusterDefinition, that.jobClusterDefinition); } @Override public int hashCode() { return Objects.hash(jobClusterDefinition, lastJobCount, disabled); } @Override public String toString() { return "JobClusterMetadataImpl [jobClusterDefinition=" + jobClusterDefinition + ", lastJobCount=" + lastJobCount + ", disabled=" + disabled + "]"; } public static class Builder { private JobClusterDefinitionImpl jobClusterDefinition; private long lastJobCount = 0; private boolean disabled; public Builder() {} public Builder withJobClusterDefinition(JobClusterDefinitionImpl jobClusterDef) { this.jobClusterDefinition = jobClusterDef; return this; } public Builder withLastJobCount(long lastJobCnt) { this.lastJobCount = lastJobCnt; return this; } public Builder withIsDisabled(boolean disabled) { this.disabled = disabled; return this; } public IJobClusterMetadata build() { return new JobClusterMetadataImpl(this.jobClusterDefinition, this.lastJobCount, this.disabled); } public IJobClusterMetadata build(JobClusterDefinitionImpl def, long lastJobCnt, boolean isDisabled) { return new JobClusterMetadataImpl(def, lastJobCount, isDisabled); } } }
4,372
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/JobDefinitionResolver.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.common.Label; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.parameter.Parameter; import io.mantisrx.server.master.domain.JobClusterConfig; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.scheduler.ScheduleRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.stream.Collectors; import static java.util.Optional.empty; import static java.util.Optional.of; /** * This class is responsible for 'filling the blanks' in the provided JobDefinition during a Job Submit. */ public class JobDefinitionResolver { private final Logger logger = LoggerFactory.getLogger(JobDefinitionResolver.class); /** * * Encodes the logic of how to resolve the relevant fields of the submitted JobDefinition. * Artifact | Version | SchedulingInfo | Resolution * ------------------------------------------------- * Y | Y | Y | Use given scheduling info * ------------------------------------------------------------ * Y | Y | N | INVALID (new artifact with no sched info) * ------------------------------------------------------------- * Y | N | Y | Generate version and use given sched info * -------------------------------------------------------------- * Y | N | N | INVALID (new artifact with no sched info) * --------------------------------------------------------------- * N | Y | Y | Lookup Cluster Config for given Version, get the SchedInfo from it and ensure given SchedInfo is compatible * ---------------------------------------------------------------- * N | Y \ N | Lookup Cluster config for given version and use it * ----------------------------------------------------------------- * N | N | Y | Get latest cluster config, get the SchedInfo from it and ensure given SchedInfo is compatible * ----------------------------------------------------------------- * N | N | N | Get latest cluster config, get the SchedInfo from it * ------------------------------------------------------------------- * @param user * @param givenJobDefnOp * @param jobClusterMetadata * @return * @throws Exception */ JobDefinition getResolvedJobDefinition(final String user, final JobDefinition givenJobDefnOp, final IJobClusterMetadata jobClusterMetadata) throws Exception { Preconditions.checkNotNull(givenJobDefnOp, "JobDefinition cannot be null"); Preconditions.checkNotNull(jobClusterMetadata, "JobClusterMetadata cannot be null"); JobDefinition resolvedJobDefn = givenJobDefnOp; logger.info("Given JobDefn {}", resolvedJobDefn); // inherit params from cluster if not specified List<Parameter> parameters = (resolvedJobDefn.getParameters() != null && !resolvedJobDefn.getParameters().isEmpty()) ? resolvedJobDefn.getParameters() : jobClusterMetadata.getJobClusterDefinition().getParameters(); // inherit labels from cluster if not specified List<Label> labels = (resolvedJobDefn.getLabels() != null && !resolvedJobDefn.getLabels().isEmpty()) ? resolvedJobDefn.getLabels() : jobClusterMetadata.getJobClusterDefinition().getLabels(); String artifactName = resolvedJobDefn.getArtifactName(); SchedulingInfo schedulingInfo = resolvedJobDefn.getSchedulingInfo(); String version = resolvedJobDefn.getVersion(); JobClusterConfig jobClusterConfig = null; if(!isNull(artifactName) && !isNull(version) && !schedulingInfoNotValid(schedulingInfo)) { // update cluster ? } else if(!isNull(artifactName) && !isNull(version) && schedulingInfoNotValid(schedulingInfo)) { // scheduling Info is not given while new artifact is specified // exception String msg = String.format("Scheduling info is not specified during Job Submit for cluster %s while new artifact is specified %s. Job Submit fails", jobClusterMetadata.getJobClusterDefinition().getName(), artifactName); logger.warn(msg); throw new Exception(msg); } else if(!isNull(artifactName) && isNull(version) && !schedulingInfoNotValid(schedulingInfo)) { // artifact & schedulingInfo are given // generate new version and update cluster version = String.valueOf(System.currentTimeMillis()); // update cluster ? } else if(!isNull(artifactName) && isNull(version) && schedulingInfoNotValid(schedulingInfo)) { // scheduling info not given while new artifact is specified // exception String msg = String.format("Scheduling info is not specified during Job Submit for cluster %s while new artifact %s is specified. Job Submit fails", jobClusterMetadata.getJobClusterDefinition().getName(), artifactName); logger.warn(msg); throw new Exception(msg); } else if(isNull(artifactName) && !isNull(version) && !schedulingInfoNotValid(schedulingInfo)) { // version is given & scheduling info is given // fetch JobCluster config for version and validate the given schedulingInfo is compatible Optional<JobClusterConfig> clusterConfigForVersion = getJobClusterConfigForVersion(jobClusterMetadata, version); if(!clusterConfigForVersion.isPresent()) { String msg = String.format("No Job Cluster config could be found for version %s in JobCluster %s. Job Submit fails", version, jobClusterMetadata.getJobClusterDefinition().getName()); logger.warn(msg); throw new Exception(msg); } jobClusterConfig = clusterConfigForVersion.get(); if(!validateSchedulingInfo(schedulingInfo, jobClusterConfig.getSchedulingInfo(), jobClusterMetadata)) { String msg = String.format("Given SchedulingInfo %s is incompatible with that associated with the given version %s in JobCluster %s. Job Submit fails", schedulingInfo, version, jobClusterMetadata.getJobClusterDefinition().getName()); logger.warn(msg); throw new Exception(msg); } artifactName = jobClusterConfig.getArtifactName(); } else if(isNull(artifactName) && !isNull(version) && schedulingInfoNotValid(schedulingInfo)) { // Only version is given // fetch JobCluster config for version Optional<JobClusterConfig> clusterConfigForVersion = getJobClusterConfigForVersion(jobClusterMetadata, version); if(!clusterConfigForVersion.isPresent()) { String msg = String.format("No Job Cluster config could be found for version %s in JobCluster %s. Job Submit fails", version, jobClusterMetadata.getJobClusterDefinition().getName()); logger.warn(msg); throw new Exception(msg); } jobClusterConfig = clusterConfigForVersion.get(); schedulingInfo = jobClusterConfig.getSchedulingInfo(); artifactName = jobClusterConfig.getArtifactName(); } else if(isNull(artifactName) && isNull(version) && !schedulingInfoNotValid(schedulingInfo)) { // only scheduling info is given // fetch latest Job Cluster config jobClusterConfig = jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig(); version = jobClusterConfig.getVersion(); artifactName = jobClusterConfig.getArtifactName(); // set version to it // validate given scheduling info is compatible if(!validateSchedulingInfo(schedulingInfo, jobClusterConfig.getSchedulingInfo(), jobClusterMetadata)) { String msg = String.format("Given SchedulingInfo %s is incompatible with that associated with the given version %s in JobCluster %s which is %s. Job Submit fails", schedulingInfo, version, jobClusterMetadata.getJobClusterDefinition().getName(), jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig().getSchedulingInfo()); logger.warn(msg); throw new Exception(msg); } } else if(isNull(artifactName) && isNull(version) && schedulingInfoNotValid(schedulingInfo)){ // Nothing is given. Use the latest on the cluster // fetch latest job cluster config jobClusterConfig = jobClusterMetadata.getJobClusterDefinition().getJobClusterConfig(); // set version to it version = jobClusterConfig.getVersion(); // use scheduling info from that. schedulingInfo = jobClusterConfig.getSchedulingInfo(); artifactName = jobClusterConfig.getArtifactName(); } else { // exception should never get here. throw new Exception(String.format("Invalid case for resolveJobDefinition artifactName %s version %s schedulingInfo %s", artifactName, version, schedulingInfo)); } logger.info("Resolved version {}, schedulingInfo {}, artifactName {}", version, schedulingInfo, artifactName); if(isNull(artifactName) || isNull(version) || schedulingInfoNotValid(schedulingInfo)) { String msg = String.format(" SchedulingInfo %s or artifact %s or version %s could not be resolved in JobCluster %s. Job Submit fails", schedulingInfo, artifactName, version, jobClusterMetadata.getJobClusterDefinition().getName()); logger.warn(msg); throw new Exception(msg); } return new JobDefinition.Builder() .from(resolvedJobDefn) .withParameters(parameters) .withLabels(labels) .withSchedulingInfo(schedulingInfo) .withUser(user) .withVersion(version) .withArtifactName(artifactName) .build(); } private static boolean schedulingInfoNotValid(SchedulingInfo schedulingInfo) { if(schedulingInfo == null || schedulingInfo.getStages().isEmpty()) { return true; } return false; } private static boolean isNull(String key) { return (key == null || key.equals("null") || key.isEmpty()) ? true : false; } /** * Lookup the job cluster config for the given version in the list of job cluster configs * @param jobClusterMetadata * @param version * @return */ Optional<JobClusterConfig> getJobClusterConfigForVersion(final IJobClusterMetadata jobClusterMetadata, final String version) { Preconditions.checkNotNull(jobClusterMetadata, "JobClusterMetadata cannot be null"); Preconditions.checkNotNull(version, "Version cannot be null"); final String versionToFind = version; List<JobClusterConfig> configList = jobClusterMetadata.getJobClusterDefinition() .getJobClusterConfigs() .stream() .filter((cfg) -> cfg.getVersion().equals(versionToFind)) .collect(Collectors.toList()); if(!configList.isEmpty()) { return of(configList.get(0)); } else { // unknown version String msg = String.format("No config with version %s found for Job Cluster %s. Job Submit fails", versionToFind, jobClusterMetadata.getJobClusterDefinition().getName()); logger.warn(msg); return empty(); } } /** * Compare given scheduling info with that configured for this artifact to make sure it is compatible * - Ensure number of stages match * @param givenSchedulingInfo * @param configuredSchedulingInfo * @param jobClusterMetadata * @return * @throws Exception */ private boolean validateSchedulingInfo(final SchedulingInfo givenSchedulingInfo, final SchedulingInfo configuredSchedulingInfo, final IJobClusterMetadata jobClusterMetadata) throws Exception { int givenNumStages = givenSchedulingInfo.getStages().size(); int existingNumStages = configuredSchedulingInfo.getStages().size(); // isReadyForJobMaster is not reliable, just check if stage 0 is defined and decrement overall count //if (jobClusterMetadata.getJobClusterDefinition().getIsReadyForJobMaster()) { if (givenSchedulingInfo.forStage(0) != null) givenNumStages--; // decrement to get net numStages without job master if (configuredSchedulingInfo.forStage(0) != null) existingNumStages--; //} if(givenNumStages != existingNumStages) { logger.warn("Mismatched scheduling info: expecting #stages=" + existingNumStages + " for given jar version [" + " " + "], where as, given scheduling info has #stages=" + givenNumStages); return false; } return true; } // private SchedulingInfo getSchedulingInfoForArtifact(String artifactName, final IJobClusterMetadata jobClusterMetadata) throws Exception{ // logger.info("Entering getSchedulingInfoForArtifact {}", artifactName); // SchedulingInfo resolvedSchedulingInfo = null; // List<JobClusterConfig> configList = jobClusterMetadata.getJobClusterDefinition().getJobClusterConfigs().stream().filter((cfg) -> cfg.getArtifactName().equals(artifactName)).collect(Collectors.toList()); // if (configList.isEmpty()) { // new artifact // throw new Exception("Scheduling info must be provided along with new artifact"); // } else { // // JobClusterConfig preconfiguredConfigForArtifact = null; // for(JobClusterConfig config : configList) { // if(artifactName.equals(config.getArtifactName())) { // preconfiguredConfigForArtifact = config; // break; // } // } // if(preconfiguredConfigForArtifact != null) { // // logger.info("Found schedulingInfo {} for artifact {}", preconfiguredConfigForArtifact.getSchedulingInfo(), artifactName); // resolvedSchedulingInfo = preconfiguredConfigForArtifact.getSchedulingInfo(); // } else { // // logger.warn("No Config found for artifact {} using default", artifactName); // JobClusterConfig config = configList.get(0); // resolvedSchedulingInfo = config.getSchedulingInfo(); // } // } // logger.info("Exiting getSchedulingInfoForArtifact {} -> with resolved config {}", artifactName, resolvedSchedulingInfo); // return resolvedSchedulingInfo; // // } }
4,373
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/IJobClusterMetadata.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import java.util.List; import io.mantisrx.server.master.domain.IJobClusterDefinition; import io.mantisrx.server.master.domain.JobId; public interface IJobClusterMetadata { IJobClusterDefinition getJobClusterDefinition(); long getLastJobCount(); boolean isDisabled(); }
4,374
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/PersistException.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; public class PersistException extends Exception { public PersistException(String msg) { super(msg); } public PersistException(Throwable e) { super(e); } }
4,375
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/SLAEnforcer.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import java.util.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.mantisrx.shaded.com.google.common.base.Preconditions; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.master.jobcluster.JobClusterActor.JobInfo; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.domain.SLA; public class SLAEnforcer { private static final Logger logger = LoggerFactory.getLogger(SLAEnforcer.class); private final Optional<SLA> sla; private final Comparator<JobInfo> comparator = (o1, o2) -> { if (o2 == null) return -1; if (o1 == null) return 1; return Long.compare(o1.jobId.getJobNum(), o2.jobId.getJobNum()); }; public SLAEnforcer(SLA sla) { this.sla = Optional.ofNullable(sla); } /** * * @param activeJobsCount * @param acceptedJobsCount * @return */ public int enforceSLAMin(int activeJobsCount, int acceptedJobsCount) { Preconditions.checkArgument(activeJobsCount >=0, "Invalid activeJobsCount " + activeJobsCount); Preconditions.checkArgument(acceptedJobsCount >=0, "Invalid acceptedJobsCount " + activeJobsCount); // if no min sla defined if(!sla.isPresent() || sla.get().getMin() == 0) { logger.debug("SLA min not set nothing to enforce"); return 0; } int jobsInActiveOrSubmittedState = activeJobsCount + acceptedJobsCount; if(jobsInActiveOrSubmittedState < sla.get().getMin()) { int jobsToLaunch = sla.get().getMin()-jobsInActiveOrSubmittedState; logger.info("Submit {} jobs per sla min of {}", jobsToLaunch, sla.get().getMin()); return jobsToLaunch; } logger.debug("SLA min already satisfied"); return 0; } /** * Walk the set of jobs in descending order (newest jobs first) track no. of running jobs. Once this * count equals slamax mark the rest of them for deletion. * * @param list A sorted (by job number) set of jobs in either running or accepted state * @return */ public List<JobId> enforceSLAMax(List<JobInfo> list) { Preconditions.checkNotNull(list, "runningOrAcceptedJobSet is null"); List<JobId> jobsToDelete = Lists.newArrayList(); // if no max sla defined; if(!sla.isPresent() || sla.get().getMax() ==0 ) { return jobsToDelete; } SortedSet<JobInfo> sortedJobSet = new TreeSet<>(comparator); sortedJobSet.addAll(list); JobInfo [] jobIdArray = sortedJobSet.toArray(new JobInfo[list.size()]); int activeJobCount = 0; int slaMax = sla.get().getMax(); boolean addToDeleteList = false; for(int i=jobIdArray.length-1; i>=0; i--) { JobInfo jInfo = jobIdArray[i]; if(addToDeleteList) { jobsToDelete.add(jInfo.jobId); } else { if (jInfo.state.equals(JobState.Launched)) { activeJobCount++; if (activeJobCount == slaMax) { addToDeleteList = true; } } } } return jobsToDelete; } public boolean hasSLA() { if(!sla.isPresent() || sla == null || (sla.get().getMin() == 0 && sla.get().getMax() == 0)) { // No SLA == NO OP return false; } return true; } /** * For Testing * @param list * @return */ Set<JobInfo> sortJobsByIdDesc(List<JobInfo> list) { SortedSet<JobInfo> sortedJobSet = new TreeSet<>(comparator); sortedJobSet.addAll(list); return sortedJobSet; } }
4,376
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/MantisJobClusterMetadataView.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.shaded.com.google.common.collect.Lists; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.common.Label; import io.mantisrx.runtime.JobOwner; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.parameter.Parameter; import io.mantisrx.server.master.domain.DataFormatAdapter; import io.mantisrx.server.master.domain.JobClusterConfig; import io.mantisrx.server.master.domain.SLA; import io.mantisrx.server.master.store.NamedJob; import java.util.List; import java.util.Objects; @JsonFilter("topLevelFilter") public class MantisJobClusterMetadataView { private final String name; private final List<NamedJob.Jar> jars; private final NamedJob.SLA sla; private final List<Parameter> parameters; private final JobOwner owner; private final long lastJobCount; private final boolean disabled; private final boolean isReadyForJobMaster; private final WorkerMigrationConfig migrationConfig; private final List<Label> labels; private final boolean cronActive; @JsonIgnore private final String latestVersion; @JsonCreator public MantisJobClusterMetadataView(@JsonProperty("name") String name, @JsonProperty("jars") List<NamedJob.Jar> jars, @JsonProperty("sla") NamedJob.SLA sla, @JsonProperty("parameters") List<Parameter> parameters, @JsonProperty("owner") JobOwner owner, @JsonProperty("lastJobCount") long lastJobCount, @JsonProperty("disabled") boolean disabled, @JsonProperty("isReadyForJobMaster") boolean isReadyForJobMaster, @JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig, @JsonProperty("labels") List<Label> labels, @JsonProperty("cronActive") boolean cronActive, @JsonProperty("latestVersion") String latestVersion) { this.name = name; this.jars = jars; this.sla = sla; this.parameters = parameters; this.owner = owner; this.lastJobCount = lastJobCount; this.disabled = disabled; this.isReadyForJobMaster = isReadyForJobMaster; this.migrationConfig = migrationConfig; this.labels = labels; this.cronActive = cronActive; this.latestVersion = latestVersion; } public String getName() { return name; } public List<NamedJob.Jar> getJars() { return jars; } public NamedJob.SLA getSla() { return sla; } public List<Parameter> getParameters() { return parameters; } public JobOwner getOwner() { return owner; } public long getLastJobCount() { return lastJobCount; } public boolean isDisabled() { return disabled; } public boolean getIsReadyForJobMaster() { return isReadyForJobMaster; } public WorkerMigrationConfig getMigrationConfig() { return migrationConfig; } public List<Label> getLabels() { return labels; } public boolean isCronActive() { return cronActive; } public String getLatestVersion() { return latestVersion; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; MantisJobClusterMetadataView that = (MantisJobClusterMetadataView) o; return lastJobCount == that.lastJobCount && disabled == that.disabled && isReadyForJobMaster == that.isReadyForJobMaster && cronActive == that.cronActive && Objects.equals(name, that.name) && Objects.equals(jars, that.jars) && Objects.equals(sla, that.sla) && Objects.equals(parameters, that.parameters) && Objects.equals(owner, that.owner) && Objects.equals(migrationConfig, that.migrationConfig) && Objects.equals(labels, that.labels) && Objects.equals(latestVersion, that.latestVersion); } @Override public int hashCode() { return Objects.hash(name, jars, sla, parameters, owner, lastJobCount, disabled, isReadyForJobMaster, migrationConfig, labels, cronActive, latestVersion); } @Override public String toString() { return "MantisJobClusterMetadataView{" + "name='" + name + '\'' + ", jars=" + jars + ", sla=" + sla + ", parameters=" + parameters + ", owner=" + owner + ", lastJobCount=" + lastJobCount + ", disabled=" + disabled + ", isReadyForJobMaster=" + isReadyForJobMaster + ", migrationConfig=" + migrationConfig + ", labels=" + labels + ", cronActive=" + cronActive + ", latestVersion='" + latestVersion + '\'' + '}'; } public static class Builder { private String name; private List<NamedJob.Jar> jars = Lists.newArrayList(); private NamedJob.SLA sla; private List<Parameter> parameters = Lists.newArrayList(); private JobOwner owner; private long lastJobCount; private boolean disabled = false; private boolean isReadyForJobMaster = true; private WorkerMigrationConfig migrationConfig; private List<Label> labels = Lists.newArrayList(); private boolean cronActive = false; private String latestVersion; public Builder() { } public Builder withName(String name) { this.name = name; return this; } public Builder withJars(List<JobClusterConfig> jars) { this.jars = DataFormatAdapter.convertJobClusterConfigsToJars(jars); return this; } public Builder withSla(SLA sla) { this.sla = DataFormatAdapter.convertSLAToNamedJobSLA(sla); return this; } public Builder withParameters(List<Parameter> params) { this.parameters = params; return this; } public Builder withJobOwner(JobOwner owner) { this.owner = owner; return this; } public Builder withLastJobCount(long cnt) { this.lastJobCount = cnt; return this; } public Builder withDisabled(boolean disabled) { this.disabled = disabled; return this; } public Builder withIsReadyForJobMaster(boolean isReadyForJobMaster) { this.isReadyForJobMaster = isReadyForJobMaster; return this; } public Builder withMigrationConfig(WorkerMigrationConfig config) { this.migrationConfig = config; return this; } public Builder withLabels(List<Label> labels) { this.labels = labels; return this; } public Builder isCronActive(boolean cronActive) { this.cronActive = cronActive; return this; } public Builder withLatestVersion(String version) { this.latestVersion = version; return this; } public MantisJobClusterMetadataView build() { Preconditions.checkNotNull(name, "name cannot be null"); Preconditions.checkNotNull(jars, "Jars cannot be null"); Preconditions.checkArg(!jars.isEmpty(),"Jars cannot be empty"); Preconditions.checkNotNull(latestVersion, "version cannot be null"); return new MantisJobClusterMetadataView(name,jars,sla,parameters,owner,lastJobCount,disabled,isReadyForJobMaster,migrationConfig,labels,cronActive,latestVersion); } } }
4,377
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/WorkerInfoListHolder.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import java.util.List; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.server.master.domain.JobId; public class WorkerInfoListHolder { private final JobId jobId; private final List<IMantisWorkerMetadata> workerMetadataList; public WorkerInfoListHolder(JobId jobId, List<IMantisWorkerMetadata> workerMetadataList) { this.jobId = jobId; this.workerMetadataList = workerMetadataList; } public JobId getJobId() { return jobId; } public List<IMantisWorkerMetadata> getWorkerMetadataList() { return workerMetadataList; } @Override public String toString() { return "WorkerInfoListHolder{" + " jobId=" + jobId + ", workerMetadataList=" + workerMetadataList + '}'; } }
4,378
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/JobListHelper.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster; import java.io.IOException; import java.util.*; import java.util.stream.Collectors; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.MantisJobStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.master.jobcluster.JobClusterActor.JobInfo; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobCriteria; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob; import rx.Observable; import static java.util.Optional.empty; import static java.util.Optional.ofNullable; public class JobListHelper { private static final Logger logger = LoggerFactory.getLogger(JobListHelper.class); // /** // * Note: rawResults are already filtered by jobstate and limit has been already applied to reduce unnecessary work // * @param rawResultList // * @param request // * @return // */ // public static List<JobInfo> getPreFilteredNonTerminalJobList(final List<JobInfo> rawResultList, // ListJobCriteria request) { // if(logger.isDebugEnabled()) { logger.debug("Entering getPreFilteredNonTerminalJobList with raw list size {} and criteria {}", rawResultList.size(), request); } // // Collections.sort(rawResultList,Comparator.comparingLong(jinfo -> jinfo.submittedAt)); // // if(request.getLimit().isPresent()) { // return rawResultList.subList(0, Math.min(rawResultList.size(), request.getLimit().get())); // } // if(logger.isDebugEnabled()) { logger.debug("Returning {} jobs in nonterminalstate ", rawResultList.size()); } // return rawResultList; // } // // public static List<CompletedJob> getPreFilteredTerminalJobList(final List<CompletedJob> rawResultList, ListJobCriteria request) { // List<CompletedJob> resultList = Lists.newArrayList(); // Observable.from(rawResultList) // .filter((completedJob) -> { // if(request.getActiveOnly().isPresent()) { // return false; // } // return true; // }) // .toSortedList((c1, c2) -> Long.compare(c1.getSubmittedAt(), c2.getSubmittedAt()) ) // .subscribe((cList) -> { // resultList.addAll(cList); // }); // if(request.getLimit().isPresent()) { // return resultList.subList(0, Math.min(resultList.size(), request.getLimit().get())); // } // return resultList; // } public static Optional<JobId> getLastSubmittedJobId(final List<JobInfo> existingJobsList, final List<CompletedJob> completedJobs) { if(logger.isTraceEnabled()) { logger.trace("Entering getLastSubmittedJobDefinition existing jobs {} completedJobs {}",existingJobsList.size(),completedJobs.size() ); } long highestJobNumber = -1; JobInfo jInfoWithHighestJobNumber = null; CompletedJob completedJobWithHighestJobNumber = null; if(logger.isDebugEnabled()) { logger.debug("No of active jobs: {}", existingJobsList.size()); } for (JobInfo jInfo : existingJobsList) { if (jInfo.jobId.getJobNum() > highestJobNumber) { highestJobNumber = jInfo.jobId.getJobNum(); jInfoWithHighestJobNumber = jInfo; } } if(logger.isDebugEnabled()) { logger.debug("Highest Active job number: {}", highestJobNumber); } if(highestJobNumber != -1) { return ofNullable(jInfoWithHighestJobNumber.jobId); } else { // search in completed Jobs for (CompletedJob cJob : completedJobs) { Optional<JobId> completedJobId = JobId.fromId(cJob.getJobId()); if (completedJobId.isPresent() && completedJobId.get().getJobNum() > highestJobNumber) { highestJobNumber = completedJobId.get().getJobNum(); completedJobWithHighestJobNumber = cJob; } } if(highestJobNumber != -1) { if(logger.isDebugEnabled()) { logger.debug("Highest completed job number: {}", highestJobNumber); } return (JobId.fromId(completedJobWithHighestJobNumber.getJobId())); } } return empty(); } }
4,379
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/proto/JobClusterProto.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.proto; import akka.actor.ActorRef; import io.mantisrx.shaded.com.google.common.collect.Lists; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import java.time.Instant; import java.util.List; import java.util.Optional; import static java.util.Optional.ofNullable; public class JobClusterProto { /** * This message is sent to a JobCluster Actor from * 1. JobClustersManagerActor during bootstrap - in which case the Job Cluster Actor will create and initialize the list of jobs passed in this message * 2. JobClustersManagerActor on receiving a CreateJobClusterRequest from the user - in which case the job cluster actor will persist to storage * @author njoshi * */ public static final class InitializeJobClusterRequest extends BaseRequest { public final JobClusterDefinitionImpl jobClusterDefinition; public final ActorRef requestor; public final String user; public final boolean isDisabled; public final long lastJobNumber; public final boolean createInStore; public final List<IMantisJobMetadata> jobList; public final List<CompletedJob> completedJobsList; /** * Invoked directly during bootstrap * @param jobClusterDefinition * @param isDisabled * @param lastJobNumber * @param jobList * @param completedJobsList * @param user * @param requestor * @param createInStore */ public InitializeJobClusterRequest(final JobClusterDefinitionImpl jobClusterDefinition, boolean isDisabled, long lastJobNumber, List<IMantisJobMetadata> jobList, List<CompletedJob> completedJobsList, String user, ActorRef requestor, boolean createInStore) { super(); Preconditions.checkNotNull(jobClusterDefinition, "JobClusterDefn cannot be null"); this.jobClusterDefinition = jobClusterDefinition; this.user = user; this.requestor = requestor; this.createInStore = createInStore; this.isDisabled = isDisabled; this.lastJobNumber = lastJobNumber; this.jobList = jobList; this.completedJobsList = completedJobsList; } /** * Invoked during Job Cluster Creation * @param jobClusterDefinition * @param user * @param requestor */ public InitializeJobClusterRequest(final JobClusterDefinitionImpl jobClusterDefinition, String user, ActorRef requestor) { this(jobClusterDefinition, false, 0, Lists.newArrayList(), Lists.newArrayList(), user, requestor, true); } @Override public String toString() { return "InitializeJobClusterRequest{" + "jobClusterDefinition=" + jobClusterDefinition + ", requestor=" + requestor + ", user='" + user + '\'' + ", isDisabled=" + isDisabled + ", lastJobNumber=" + lastJobNumber + ", createInStore=" + createInStore + ", jobList=" + jobList + ", completedJobsList=" + completedJobsList + '}'; } } /** * Indicates whether a job cluster was initialized successfully * Typical failures include unable to write to store * @author njoshi * */ public static final class InitializeJobClusterResponse extends BaseResponse { public final ActorRef requestor; public final String jobClusterName; public InitializeJobClusterResponse(final long requestId, final ResponseCode responseCode, final String message, final String jobClusterName, final ActorRef requestor) { super(requestId, responseCode, message); this.requestor = requestor; this.jobClusterName = jobClusterName; } } // public static final class AddArchivedJobs { // public final List<IMantisJobMetadata> archivedJobsList; // public AddArchivedJobs(List<IMantisJobMetadata> list) { // this.archivedJobsList = list; // } // } /** * Deletes all records associated with this job cluster in store * Terminates cluster actor * * Only allowed if there are no jobs currently running. * @author njoshi * */ public static final class DeleteJobClusterRequest extends BaseRequest { public final String jobClusterName; public final String user; public final ActorRef requestingActor; public DeleteJobClusterRequest(final String user, final String name, final ActorRef requestor) { super(); this.jobClusterName = name; this.user = user; this.requestingActor = requestor; } } /** * Whether the delete was successful * @author njoshi * */ public static final class DeleteJobClusterResponse extends BaseResponse { public final ActorRef requestingActor; public final String clusterName; public DeleteJobClusterResponse(long requestId, ResponseCode responseCode, String message, ActorRef requestingActor, String clusterName) { super(requestId, responseCode, message); this.requestingActor = requestingActor; this.clusterName = clusterName; } public ActorRef getRequestingActor() { return requestingActor; } public String getClusterName() { return clusterName; } } public static final class KillJobRequest extends BaseRequest { public final JobId jobId; public final String reason; public final JobCompletedReason jobCompletedReason; public final String user; public final ActorRef requestor; public KillJobRequest(final JobId jobId, final String reason, final JobCompletedReason jobCompletedReason, final String user, final ActorRef requestor) { super(); this.jobId = jobId; this.reason = reason; this.jobCompletedReason = jobCompletedReason; this.user = user; this.requestor = requestor; } @Override public String toString() { return "KillJobRequest [jobId=" + jobId + ", reason=" + reason + ", user=" + user + ", requestor=" + requestor + "]"; } } public static final class KillJobResponse extends BaseResponse { public final JobId jobId; public final ActorRef requestor; public final JobState state; public final String user; public final Optional<IMantisJobMetadata> jobMetadata; public KillJobResponse(long requestId, ResponseCode responseCode, JobState state, String message, JobId jobId, IMantisJobMetadata jobMeta, String user, final ActorRef requestor) { super(requestId, responseCode, message); this.jobId = jobId; this.requestor = requestor; this.state = state; this.user = user; this.jobMetadata = ofNullable(jobMeta); } @Override public String toString() { return "KillJobResponse{" + "jobId=" + jobId + ", requestor=" + requestor + ", state=" + state + ", user='" + user + '\'' + ", jobMetadata=" + jobMetadata + '}'; } } public static final class JobStartedEvent { public final JobId jobid; public JobStartedEvent(JobId jobId) { this.jobid = jobId; } @Override public String toString() { return "JobStartedEvent [jobid=" + jobid + "]"; } } public static final class EnforceSLARequest { public final Instant timeOfEnforcement; public final Optional<JobDefinition> jobDefinitionOp; public EnforceSLARequest() { this(Instant.now(),Optional.empty()); } public EnforceSLARequest(Instant now) { this( now, Optional.empty()); } public EnforceSLARequest(Instant now, Optional<JobDefinition> jobDefnOp) { this.timeOfEnforcement = now; this.jobDefinitionOp = jobDefnOp; } } public static final class ExpireOldJobsRequest { public final Instant timeOfEnforcement; public ExpireOldJobsRequest() { this(Instant.now()); } public ExpireOldJobsRequest(Instant now) { this.timeOfEnforcement = now; } } public static final class BookkeepingRequest { public final Instant time; public BookkeepingRequest(Instant time) { this.time = time;; } public BookkeepingRequest() { this(Instant.now()); } } public static final class TriggerCronRequest { public final Instant time; public TriggerCronRequest(Instant time) { this.time = time;; } public TriggerCronRequest() { this(Instant.now()); } } }
4,380
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/proto/JobProto.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.proto; import akka.actor.ActorRef; import io.mantisrx.server.master.domain.JobId; import java.time.Instant; public class JobProto { public interface JobEvent { public String getName(); } public static final class InitJob extends BaseRequest { public final ActorRef requstor; public final boolean isSubmit; public InitJob(ActorRef requestor) { this(requestor, true); } public InitJob(ActorRef requestor, boolean isSubmit) { this.requstor = requestor; this.isSubmit = isSubmit; } @Override public String toString() { return "InitJob{" + "requstor=" + requstor + ", isSubmit=" + isSubmit + ", requestId=" + requestId + '}'; } } public static final class JobInitialized extends BaseResponse { public final JobId jobId; public final ActorRef requestor; public JobInitialized(final long requestId, final ResponseCode responseCode, final String message, JobId jobId, ActorRef requestor) { super(requestId, responseCode, message); this.jobId = jobId; this.requestor = requestor; } @Override public String toString() { return "JobInitialized{" + "jobId=" + jobId + ", requestor=" + requestor + ", requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } /////////////////////////////////// JOB Related Messages /////////////////////////////////////////////// public static final class RuntimeLimitReached { } public static final class CheckHeartBeat { Instant n = null; public CheckHeartBeat() { } public CheckHeartBeat(Instant now) { n = now; } public Instant getTime() { if(n == null) { return Instant.now(); } else { return n; } } } public static final class SendWorkerAssignementsIfChanged { } public static final class MigrateDisabledVmWorkersRequest { public final Instant time; public MigrateDisabledVmWorkersRequest(Instant time) { this.time = time;; } public MigrateDisabledVmWorkersRequest() { this(Instant.now()); } } public static class SelfDestructRequest { } }
4,381
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/proto/BaseResponse.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.proto; import com.netflix.spectator.impl.Preconditions; public class BaseResponse { public enum ResponseCode { SUCCESS(200), //200 SUCCESS_CREATED(201), //201 CLIENT_ERROR(400), //400 CLIENT_ERROR_NOT_FOUND(404), //404 OPERATION_NOT_ALLOWED(405), //405 CLIENT_ERROR_CONFLICT(409), //409 SERVER_ERROR(500); //500 private final int value; ResponseCode(int val) { this.value = val; } public int getValue() { return this.value; } } public final long requestId; public final ResponseCode responseCode; public final String message; public BaseResponse( final long requestId, final ResponseCode responseCode, final String message) { Preconditions.checkNotNull(responseCode, "Response code cannot be null"); // Preconditions.checkArg(message != null, "message cannot be null"); this.requestId = requestId; this.responseCode = responseCode; this.message = message; } }
4,382
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/proto/JobClusterManagerProto.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.proto; import akka.actor.ActorRef; import akka.http.javadsl.model.Uri; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.shaded.com.google.common.base.Strings; import io.mantisrx.shaded.com.google.common.collect.Lists; import com.mantisrx.common.utils.LabelUtils; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.common.Label; import io.mantisrx.master.api.akka.route.pagination.ListObject; import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter.JobIdInfo; import io.mantisrx.master.jobcluster.MantisJobClusterMetadataView; import io.mantisrx.master.jobcluster.job.IMantisJobMetadata; import io.mantisrx.master.jobcluster.job.JobState; import io.mantisrx.master.jobcluster.job.MantisJobMetadataView; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.master.domain.IJobClusterDefinition; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl; import io.mantisrx.server.master.domain.JobClusterDefinitionImpl.CompletedJob; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.scheduler.MantisScheduler; import rx.subjects.BehaviorSubject; import java.time.Instant; import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.function.Function; import java.util.regex.Pattern; import java.util.stream.Collectors; public class JobClusterManagerProto { public static final class CreateJobClusterRequest extends BaseRequest { private final JobClusterDefinitionImpl jobClusterDefinition; private final String user; public CreateJobClusterRequest( final JobClusterDefinitionImpl jobClusterDefinition, String user) { super(); Preconditions.checkNotNull(jobClusterDefinition, "JobClusterDefn cannot be null"); this.jobClusterDefinition = jobClusterDefinition; this.user = user; } public JobClusterDefinitionImpl getJobClusterDefinition() { return jobClusterDefinition; } public String getUser() { return user; } } public static final class ReconcileJobCluster { public final Instant timeOfEnforcement; public ReconcileJobCluster(Instant now) { timeOfEnforcement = now; } public ReconcileJobCluster() { timeOfEnforcement = Instant.now(); } } public static final class CreateJobClusterResponse extends BaseResponse { private final String jobClusterName; public CreateJobClusterResponse( final long requestId, final ResponseCode responseCode, final String message, final String jobClusterName ) { super(requestId, responseCode, message); this.jobClusterName = jobClusterName; } public String getJobClusterName() { return jobClusterName; } @Override public String toString() { return "CreateJobClusterResponse{" + "jobClusterName='" + jobClusterName + '\'' + ", requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } public static final class DeleteJobClusterRequest extends BaseRequest { private final String name; private final String user; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public DeleteJobClusterRequest( @JsonProperty("user") final String user, @JsonProperty("name") final String name) { super(); Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request"); Preconditions.checkArg( name != null & !name.isEmpty(), "Must provide job cluster name in request"); this.user = user; this.name = name; } public String getName() { return name; } public String getUser() { return user; } } public static final class DeleteJobClusterResponse extends BaseResponse { public DeleteJobClusterResponse(long requestId, ResponseCode responseCode, String message) { super(requestId, responseCode, message); } } public static final class JobClustersManagerInitialize extends BaseRequest { private final MantisScheduler scheduler; private final boolean loadJobsFromStore; public JobClustersManagerInitialize( final MantisScheduler scheduler, final boolean loadJobsFromStore) { Preconditions.checkNotNull(scheduler, "MantisScheduler cannot be null"); this.scheduler = scheduler; this.loadJobsFromStore = loadJobsFromStore; } public MantisScheduler getScheduler() { return scheduler; } public boolean isLoadJobsFromStore() { return loadJobsFromStore; } } public static final class JobClustersManagerInitializeResponse extends BaseResponse { public JobClustersManagerInitializeResponse( long requestId, ResponseCode responseCode, String message) { super(requestId, responseCode, message); } @Override public String toString() { return "JobClustersManagerInitializeResponse{" + "requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } /** * Get a list of all job clusters in the system * * @author njoshi */ public static final class ListJobClustersRequest extends BaseRequest { public ListJobClustersRequest() { super(); } } public static final class ListJobClustersResponse extends BaseResponse { private final List<MantisJobClusterMetadataView> jobClusters; public ListJobClustersResponse( long requestId, ResponseCode responseCode, String message, List<MantisJobClusterMetadataView> jobClusters) { super(requestId, responseCode, message); this.jobClusters = jobClusters; } public List<MantisJobClusterMetadataView> getJobClusters() { return jobClusters; } public ListObject<MantisJobClusterMetadataView> getJobClusters( String regexMatcher, Integer limit, Integer offset, String sortField, Boolean sortAscending, Uri uri) { List<MantisJobClusterMetadataView> targetJobClusters = jobClusters; if (!Strings.isNullOrEmpty(regexMatcher)) { Pattern matcher = Pattern.compile(regexMatcher, Pattern.CASE_INSENSITIVE); targetJobClusters = targetJobClusters.stream() .filter(jobCluster -> matcher.matcher(jobCluster.getName()).find()) .collect(Collectors.toList()); } ListObject.Builder<MantisJobClusterMetadataView> builder = new ListObject.Builder<MantisJobClusterMetadataView>() .withObjects(targetJobClusters, MantisJobClusterMetadataView.class); if (limit != null) { builder = builder.withLimit(limit); } if (offset != null) { builder = builder.withOffset(offset); } if (sortField != null) { builder = builder.withSortField(sortField); } if (sortAscending != null) { builder = builder.withSortAscending(sortAscending); } if (uri != null) { builder = builder.withUri(uri); } return builder.build(); } @Override public String toString() { return "ListJobClustersResponse{" + "jobClusters=" + jobClusters + ", requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } /** * Invoked by user to update a job cluster * * @author njoshi */ public static final class UpdateJobClusterRequest extends BaseRequest { private final JobClusterDefinitionImpl jobClusterDefinition; private final String user; public UpdateJobClusterRequest( final JobClusterDefinitionImpl jobClusterDefinition, String user) { Preconditions.checkNotNull(jobClusterDefinition, "JobClusterDefinition cannot be null"); Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request"); this.jobClusterDefinition = jobClusterDefinition; this.user = user; } public JobClusterDefinitionImpl getJobClusterDefinition() { return jobClusterDefinition; } public String getUser() { return user; } @Override public String toString() { return "UpdateJobClusterRequest{" + "jobClusterDefinition=" + jobClusterDefinition + ", user='" + user + '\'' + ", requestId=" + requestId + '}'; } } /** * Indicates whether an update was successful * * @author njoshi */ public static final class UpdateJobClusterResponse extends BaseResponse { public UpdateJobClusterResponse( final long requestId, final ResponseCode responseCode, final String message) { super(requestId, responseCode, message); } @Override public String toString() { return "UpdateJobClusterResponse{" + "requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } /** * Updates the SLA for the job cluster with an optional force enable option if cluster is disabled * * @author njoshi */ public static final class UpdateJobClusterSLARequest extends BaseRequest { private final String clusterName; private final int min; private final int max; private final String cronSpec; private final IJobClusterDefinition.CronPolicy cronPolicy; private final boolean forceEnable; private final String user; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public UpdateJobClusterSLARequest( @JsonProperty("name") final String name, @JsonProperty("min") final Integer min, @JsonProperty("max") final Integer max, @JsonProperty("cronspec") final String cronSpec, @JsonProperty("cronpolicy") final IJobClusterDefinition.CronPolicy cronPolicy, @JsonProperty(value = "forceenable", defaultValue = "false") boolean forceEnable, @JsonProperty("user") final String user) { Preconditions.checkNotNull(min, "min"); Preconditions.checkNotNull(max, "max"); Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request"); Preconditions.checkArg( name != null & !name.isEmpty(), "Must provide job cluster name in request"); this.clusterName = name; this.max = max; this.min = min; this.cronSpec = cronSpec; this.cronPolicy = cronPolicy; this.forceEnable = forceEnable; this.user = user; } public UpdateJobClusterSLARequest( final String name, final int min, final int max, final String user) { this(name, min, max, null, null, false, user); } public String getClusterName() { return clusterName; } public int getMin() { return min; } public int getMax() { return max; } public String getCronSpec() { return cronSpec; } public IJobClusterDefinition.CronPolicy getCronPolicy() { return cronPolicy; } public boolean isForceEnable() { return forceEnable; } public String getUser() { return user; } @Override public String toString() { return "UpdateJobClusterSLARequest{" + "clusterName='" + clusterName + '\'' + ", min=" + min + ", max=" + max + ", cronSpec='" + cronSpec + '\'' + ", cronPolicy=" + cronPolicy + ", forceEnable=" + forceEnable + ", user='" + user + '\'' + ", requestId=" + requestId + '}'; } } public static final class UpdateJobClusterSLAResponse extends BaseResponse { public UpdateJobClusterSLAResponse( final long requestId, final ResponseCode responseCode, final String message) { super(requestId, responseCode, message); } @Override public String toString() { return "UpdateJobClusterSLAResponse{" + "requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } public static final class UpdateJobClusterLabelsRequest extends BaseRequest { private final List<Label> labels; private final String user; private final String clusterName; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public UpdateJobClusterLabelsRequest( @JsonProperty("name") final String clusterName, @JsonProperty("labels") final List<Label> labels, @JsonProperty("user") final String user) { Preconditions.checkNotNull(labels, "labels"); Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request"); Preconditions.checkArg( clusterName != null & !clusterName.isEmpty(), "Must provide job cluster name in request"); this.labels = labels; this.user = user; this.clusterName = clusterName; } public List<Label> getLabels() { return labels; } public String getUser() { return user; } public String getClusterName() { return clusterName; } @Override public String toString() { return "UpdateJobClusterLabelsRequest{" + "labels=" + labels + ", user='" + user + '\'' + ", clusterName='" + clusterName + '\'' + ", requestId=" + requestId + '}'; } } public static final class UpdateJobClusterLabelsResponse extends BaseResponse { public UpdateJobClusterLabelsResponse( final long requestId, final ResponseCode responseCode, final String message) { super(requestId, responseCode, message); } @Override public String toString() { return "UpdateJobClusterLabelsResponse{" + "requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } public static final class UpdateJobClusterArtifactRequest extends BaseRequest { private final String artifactName; private final String version; private final boolean skipSubmit; private final String user; private final String clusterName; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public UpdateJobClusterArtifactRequest( @JsonProperty("name") final String clusterName, @JsonProperty("url") final String artifact, @JsonProperty("version") final String version, @JsonProperty("skipsubmit") final boolean skipSubmit, @JsonProperty("user") final String user) { Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request"); Preconditions.checkArg( clusterName != null & !clusterName.isEmpty(), "Must provide job cluster name in request"); Preconditions.checkArg( artifact != null && !artifact.isEmpty(), "Artifact cannot be null or empty"); Preconditions.checkArg( version != null && !version.isEmpty(), "version cannot be null or empty"); this.clusterName = clusterName; this.artifactName = artifact; this.version = version; this.skipSubmit = skipSubmit; this.user = user; } public String getArtifactName() { return artifactName; } public String getVersion() { return version; } public boolean isSkipSubmit() { return skipSubmit; } public String getUser() { return user; } public String getClusterName() { return clusterName; } @Override public String toString() { return "UpdateJobClusterArtifactRequest{" + "artifactName='" + artifactName + '\'' + ", version='" + version + '\'' + ", skipSubmit=" + skipSubmit + ", user='" + user + '\'' + ", clusterName='" + clusterName + '\'' + ", requestId=" + requestId + '}'; } } public static final class UpdateJobClusterArtifactResponse extends BaseResponse { public UpdateJobClusterArtifactResponse( final long requestId, final ResponseCode responseCode, final String message) { super(requestId, responseCode, message); } @Override public String toString() { return "UpdateJobClusterArtifactResponse{" + "requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } public static final class UpdateJobClusterWorkerMigrationStrategyRequest extends BaseRequest { private final WorkerMigrationConfig migrationConfig; private final String clusterName; private final String user; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public UpdateJobClusterWorkerMigrationStrategyRequest( @JsonProperty("name") final String clusterName, @JsonProperty("migrationConfig") final WorkerMigrationConfig config, @JsonProperty("user") final String user) { Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request"); Preconditions.checkArg( clusterName != null & !clusterName.isEmpty(), "Must provide job cluster name in request"); Preconditions.checkNotNull(config, "migrationConfig"); this.migrationConfig = config; this.clusterName = clusterName; this.user = user; } public WorkerMigrationConfig getMigrationConfig() { return migrationConfig; } public String getClusterName() { return clusterName; } public String getUser() { return user; } @Override public String toString() { return "UpdateJobClusterWorkerMigrationStrategyRequest{" + "migrationConfig=" + migrationConfig + ", clusterName='" + clusterName + '\'' + ", user='" + user + '\'' + ", requestId=" + requestId + '}'; } } public static final class UpdateJobClusterWorkerMigrationStrategyResponse extends BaseResponse { public UpdateJobClusterWorkerMigrationStrategyResponse( final long requestId, final ResponseCode responseCode, final String message) { super(requestId, responseCode, message); } @Override public String toString() { return "UpdateJobClusterWorkerMigrationStrategyResponse{" + "requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } /** * Invoked by user. * Kills all currently running jobs and puts itself in disabled state (also updates store) * Any SLA enforcement is disabled * * @author njoshi */ public static final class DisableJobClusterRequest extends BaseRequest { private final String user; private final String clusterName; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public DisableJobClusterRequest( @JsonProperty("name") String clusterName, @JsonProperty("user") String user) { Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request"); Preconditions.checkArg( clusterName != null & !clusterName.isEmpty(), "Must provide job cluster name in request"); this.user = user; this.clusterName = clusterName; } public String getUser() { return user; } public String getClusterName() { return clusterName; } @Override public String toString() { return "DisableJobClusterRequest{" + "user='" + user + '\'' + ", clusterName='" + clusterName + '\'' + ", requestId=" + requestId + '}'; } } /** * Whether a disable request was successful * * @author njoshi */ public static final class DisableJobClusterResponse extends BaseResponse { public DisableJobClusterResponse( final long requestId, final ResponseCode responseCode, final String message) { super(requestId, responseCode, message); } @Override public String toString() { return "DisableJobClusterResponse{" + "requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } /** * Enables the job cluster. Restarts SLA enforcement logic and updates store. * * @author njoshi */ public static final class EnableJobClusterRequest extends BaseRequest { private final String user; private final String clusterName; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public EnableJobClusterRequest( @JsonProperty("name") final String clusterName, @JsonProperty("user") final String user) { Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request"); Preconditions.checkArg( clusterName != null & !clusterName.isEmpty(), "Must provide job cluster name in request"); this.user = user; this.clusterName = clusterName; } public String getUser() { return user; } public String getClusterName() { return clusterName; } @Override public String toString() { return "EnableJobClusterRequest{" + "user='" + user + '\'' + ", clusterName='" + clusterName + '\'' + ", requestId=" + requestId + '}'; } } /** * Whether enable was successfull * * @author njoshi */ public static final class EnableJobClusterResponse extends BaseResponse { public EnableJobClusterResponse( final long requestId, final ResponseCode responseCode, final String message) { super(requestId, responseCode, message); } @Override public String toString() { return "EnableJobClusterResponse{" + "requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } /** * Request the job cluster definition * * @author njoshi */ public static final class GetJobClusterRequest extends BaseRequest { private final String jobClusterName; public GetJobClusterRequest(final String name) { super(); Preconditions.checkArg( name != null && !name.isEmpty(), "Jobcluster name cannot be null or empty"); this.jobClusterName = name; } public String getJobClusterName() { return jobClusterName; } @Override public String toString() { return "GetJobClusterRequest{" + "jobClusterName='" + jobClusterName + '\'' + ", requestId=" + requestId + '}'; } } /** * Response to the getJobClusterRequest with the actual job cluster definition. * * @author njoshi */ public static final class GetJobClusterResponse extends BaseResponse { private final Optional<MantisJobClusterMetadataView> jobClusterOp; public GetJobClusterResponse( long requestId, ResponseCode responseCode, String message, Optional<MantisJobClusterMetadataView> jobClusterOp) { super(requestId, responseCode, message); Preconditions.checkNotNull(jobClusterOp, "Job cluster cannot be null"); this.jobClusterOp = jobClusterOp; } public Optional<MantisJobClusterMetadataView> getJobCluster() { return jobClusterOp; } @Override public String toString() { return "GetJobClusterResponse{" + "jobClusterOp=" + jobClusterOp + ", requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } public static final class ListJobCriteria { private final Optional<Integer> limit; private final Optional<JobState.MetaState> jobState; private final List<Integer> stageNumberList; private final List<Integer> workerIndexList; private final List<Integer> workerNumberList; private final List<WorkerState.MetaState> workerStateList; private final Optional<Boolean> activeOnly; private final Optional<String> matchingRegex; private final List<Label> matchingLabels; private final Optional<String> labelsOperand; public ListJobCriteria( final Optional<Integer> limit, final Optional<JobState.MetaState> jobState, final List<Integer> stageNumber, final List<Integer> workerIndex, final List<Integer> workerNumber, final List<WorkerState.MetaState> workerState, final Optional<Boolean> activeOnly, final Optional<String> matchingRegex, final Optional<String> matchingLabels, final Optional<String> labelsOperand) { this.limit = limit; this.jobState = jobState; this.stageNumberList = stageNumber; this.workerIndexList = workerIndex; this.workerNumberList = workerNumber; this.workerStateList = workerState; this.activeOnly = activeOnly; this.matchingRegex = matchingRegex; this.matchingLabels = matchingLabels.map(query -> LabelUtils.generatePairs(query)) .orElse(Collections.emptyList()); this.labelsOperand = labelsOperand; } public ListJobCriteria() { this( Optional.empty(), Optional.empty(), Lists.newArrayList(), Lists.newArrayList(), Lists.newArrayList(), Lists.newArrayList(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); } public Optional<Integer> getLimit() { return limit; } public Optional<JobState.MetaState> getJobState() { return jobState; } public List<Integer> getStageNumberList() { return stageNumberList; } public List<Integer> getWorkerIndexList() { return workerIndexList; } public List<Integer> getWorkerNumberList() { return workerNumberList; } public List<WorkerState.MetaState> getWorkerStateList() { return workerStateList; } public Optional<Boolean> getActiveOnly() { return activeOnly; } public Optional<String> getMatchingRegex() { return matchingRegex; } public List<Label> getMatchingLabels() { return matchingLabels; } public Optional<String> getLabelsOperand() { return labelsOperand; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final ListJobCriteria that = (ListJobCriteria) o; return Objects.equals(limit, that.limit) && Objects.equals(jobState, that.jobState) && Objects.equals(stageNumberList, that.stageNumberList) && Objects.equals(workerIndexList, that.workerIndexList) && Objects.equals(workerNumberList, that.workerNumberList) && Objects.equals(workerStateList, that.workerStateList) && Objects.equals(activeOnly, that.activeOnly) && Objects.equals(matchingRegex, that.matchingRegex) && Objects.equals(matchingLabels, that.matchingLabels) && Objects.equals(labelsOperand, that.labelsOperand); } @Override public int hashCode() { return Objects.hash( limit, jobState, stageNumberList, workerIndexList, workerNumberList, workerStateList, activeOnly, matchingRegex, matchingLabels, labelsOperand); } @Override public String toString() { return "ListJobCriteria{" + "limit=" + limit + ", jobState=" + jobState + ", stageNumberList=" + stageNumberList + ", workerIndexList=" + workerIndexList + ", workerNumberList=" + workerNumberList + ", workerStateList=" + workerStateList + ", activeOnly=" + activeOnly + ", matchingRegex=" + matchingRegex + ", matchingLabels=" + matchingLabels + ", labelsOperand=" + labelsOperand + '}'; } } /** * Request a list of job metadata based on different criteria */ public static class ListJobsRequest extends BaseRequest { private final ListJobCriteria filters; public ListJobsRequest(final ListJobCriteria filters) { this.filters = filters; } public ListJobsRequest() { this(new ListJobCriteria()); } public ListJobsRequest(final String clusterName) { this(new ListJobCriteria( Optional.empty(), Optional.empty(), Lists.newArrayList(), Lists.newArrayList(), Lists.newArrayList(), Lists.newArrayList(), Optional.empty(), Optional.ofNullable(clusterName), Optional.empty(), Optional.empty())); } public ListJobCriteria getCriteria() { return filters; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final ListJobsRequest that = (ListJobsRequest) o; return Objects.equals(filters, that.filters); } @Override public int hashCode() { return Objects.hash(filters); } @Override public String toString() { return "ListJobsRequest{" + "filters=" + filters + ", requestId=" + requestId + '}'; } } public static final class ListJobsResponse extends BaseResponse { private final List<MantisJobMetadataView> jobs; public ListJobsResponse( long requestId, ResponseCode responseCode, String message, List<MantisJobMetadataView> list) { super(requestId, responseCode, message); Preconditions.checkNotNull(list, "job ids list cannot be null"); this.jobs = list; } public List<MantisJobMetadataView> getJobList() { return jobs; } public <R> ListObject<R> getJobList(Function<MantisJobMetadataView, R> func, Class<R> classType, Integer pageSize, Integer offset, String sortField, Boolean sortAscending, Uri uri) { List<R> mappedList = jobs.stream().map(func).collect(Collectors.toList()); return getTransformedJobList(mappedList, classType, pageSize, offset, sortField, sortAscending, uri); } public ListObject<MantisJobMetadataView> getJobList( Integer pageSize, Integer offset, String sortField, Boolean sortAscending, Uri uri) { return getTransformedJobList(jobs, MantisJobMetadataView.class, pageSize, offset, sortField, sortAscending, uri); } private <T> ListObject<T> getTransformedJobList( List<T> list, Class<T> classType, Integer pageSize, Integer offset, String sortField, Boolean sortAscending, Uri uri) { ListObject.Builder<T> builder = new ListObject.Builder<T>().withObjects(list, classType); if (uri != null) { builder = builder.withUri(uri); } if (pageSize != null) { builder = builder.withLimit(pageSize); } if (offset != null) { builder = builder.withOffset(offset); } if (sortAscending != null) { builder = builder.withSortAscending(sortAscending); } if (sortField != null) { builder = builder.withSortField(sortField); } return builder.build(); } @Override public String toString() { return "ListJobsResponse{" + "jobs=" + jobs + ", requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } /** * Request a list of job IDs based on different criteria */ public static final class ListJobIdsRequest extends BaseRequest { public final ListJobCriteria filters; public ListJobIdsRequest( final Optional<Integer> limit, final Optional<JobState.MetaState> jobState, final Optional<Boolean> activeOnly, final Optional<String> matchingRegex, final Optional<String> matchingLabels, final Optional<String> labelsOperand) { super(); filters = new ListJobCriteria( limit, jobState, Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), activeOnly, matchingRegex, matchingLabels, labelsOperand); } public ListJobIdsRequest() { this( Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); } public ListJobCriteria getCriteria() { return this.filters; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final ListJobIdsRequest that = (ListJobIdsRequest) o; return Objects.equals(filters, that.filters); } @Override public int hashCode() { return Objects.hash(filters); } @Override public String toString() { return "ListJobIdsRequest{" + "filters=" + filters + ", requestId=" + requestId + '}'; } } public static final class ListJobIdsResponse extends BaseResponse { private final List<JobIdInfo> jobIds; public ListJobIdsResponse( long requestId, ResponseCode responseCode, String message, List<JobIdInfo> list) { super(requestId, responseCode, message); Preconditions.checkNotNull(list, "job ids list cannot be null"); this.jobIds = list; } public List<JobIdInfo> getJobIds() { return jobIds; } @Override public String toString() { return "ListJobIdsResponse{" + "jobIds=" + jobIds + ", requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } /** * Request a list of archived workers for the given job ID */ public static final class ListArchivedWorkersRequest extends BaseRequest { public static final int DEFAULT_LIST_ARCHIVED_WORKERS_LIMIT = 100; private final JobId jobId; private final int limit; public ListArchivedWorkersRequest(final JobId jobId) { this(jobId, DEFAULT_LIST_ARCHIVED_WORKERS_LIMIT); } public ListArchivedWorkersRequest(final JobId jobId, int limit) { Preconditions.checkNotNull(jobId, "JobId"); this.jobId = jobId; this.limit = limit; } public JobId getJobId() { return jobId; } public int getLimit() { return limit; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final ListArchivedWorkersRequest that = (ListArchivedWorkersRequest) o; return limit == that.limit && Objects.equals(jobId, that.jobId); } @Override public int hashCode() { return Objects.hash(jobId, limit); } @Override public String toString() { return "ListArchivedWorkersRequest{" + "jobId=" + jobId + ", limit=" + limit + ", requestId=" + requestId + '}'; } } public static final class ListArchivedWorkersResponse extends BaseResponse { private final List<IMantisWorkerMetadata> mantisWorkerMetadata; public ListArchivedWorkersResponse( long requestId, ResponseCode responseCode, String message, List<IMantisWorkerMetadata> list) { super(requestId, responseCode, message); Preconditions.checkNotNull(list, "worker metadata list cannot be null"); this.mantisWorkerMetadata = list; } public List<IMantisWorkerMetadata> getWorkerMetadata() { return mantisWorkerMetadata; } public ListObject<IMantisWorkerMetadata> getWorkerMetadata( Integer pageSize, Integer offset, String sortField, Boolean sortAscending, Uri uri) { return getTransformedWorkerMetadata(mantisWorkerMetadata, IMantisWorkerMetadata.class, pageSize, offset, sortField, sortAscending, uri); } public <R> ListObject<R> getWorkerMetadata( Function<IMantisWorkerMetadata, R> func, Class<R> classType, Integer pageSize, Integer offset, String sortField, Boolean sortAscending, Uri uri) { List<R> mappedList = mantisWorkerMetadata.stream().map(func).collect(Collectors.toList()); return getTransformedWorkerMetadata(mappedList, classType, pageSize, offset, sortField, sortAscending, uri); } private <T> ListObject<T> getTransformedWorkerMetadata( List<T> list, Class<T> classType, Integer pageSize, Integer offset, String sortField, Boolean sortAscending, Uri uri) { ListObject.Builder<T> builder = new ListObject.Builder<T>() .withObjects(list, classType); if (pageSize != null) { builder = builder.withLimit(pageSize); } if (offset != null) { builder = builder.withOffset(offset); } if (sortField != null) { builder = builder.withSortField(sortField); } if (sortAscending != null) { builder = builder.withSortAscending(sortAscending); } if (uri != null) { builder = builder.withUri(uri); } return builder.build(); } @Override public String toString() { return "ListArchivedWorkersResponse{" + "mantisWorkerMetadata=" + mantisWorkerMetadata + ", requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } public static final class ListWorkersRequest extends BaseRequest { public static final int DEFAULT_LIST_WORKERS_LIMIT = 100; private final JobId jobId; private final int limit; public ListWorkersRequest(final JobId jobId) { this(jobId, DEFAULT_LIST_WORKERS_LIMIT); } public ListWorkersRequest(final JobId jobId, int limit) { this.jobId = jobId; this.limit = limit; } public JobId getJobId() { return jobId; } public int getLimit() { return limit; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final ListWorkersRequest that = (ListWorkersRequest) o; return limit == that.limit && Objects.equals(jobId, that.jobId); } @Override public int hashCode() { return Objects.hash(jobId, limit); } @Override public String toString() { return "ListWorkersRequest{" + "jobId=" + jobId + ", limit=" + limit + ", requestId=" + requestId + '}'; } } public static final class ListWorkersResponse extends BaseResponse { private final List<IMantisWorkerMetadata> mantisWorkerMetadata; public ListWorkersResponse( long requestId, ResponseCode responseCode, String message, List<IMantisWorkerMetadata> list) { super(requestId, responseCode, message); Preconditions.checkNotNull(list, "worker metadata list cannot be null"); this.mantisWorkerMetadata = list; } public List<IMantisWorkerMetadata> getWorkerMetadata() { return mantisWorkerMetadata; } @Override public String toString() { return "ListWorkersResponse{" + "mantisWorkerMetadata=" + mantisWorkerMetadata + ", requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } /** * Request a list of completed job Ids in this cluster * * @author njoshi */ public static final class ListCompletedJobsInClusterRequest extends BaseRequest { private final String clusterName; private final int limit; public ListCompletedJobsInClusterRequest(final String name) { this(name, 100); } public ListCompletedJobsInClusterRequest(final String name, final int limit) { super(); Preconditions.checkArg( name != null && !name.isEmpty(), "Jobcluster name cannot be null or empty"); this.clusterName = name; this.limit = limit; } public int getLimit() { return this.limit; } public String getClusterName() { return clusterName; } @Override public String toString() { return "ListCompletedJobsInClusterRequest{" + "clusterName='" + clusterName + '\'' + ", limit=" + limit + ", requestId=" + requestId + '}'; } } public static final class ListCompletedJobsInClusterResponse extends BaseResponse { private final List<CompletedJob> completedJobs; public ListCompletedJobsInClusterResponse( long requestId, ResponseCode responseCode, String message, List<CompletedJob> completedJobs) { super(requestId, responseCode, message); this.completedJobs = completedJobs; } public List<CompletedJob> getCompletedJobs() { return completedJobs; } @Override public String toString() { return "ListCompletedJobsInClusterResponse [completedJobs=" + completedJobs + "]"; } } public static final class SubmitJobRequest extends BaseRequest { private final Optional<JobDefinition> jobDefinition; private final String submitter; private final String clusterName; private final boolean isAutoResubmit; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public SubmitJobRequest( @JsonProperty("name") final String clusterName, @JsonProperty("user") final String user, @JsonProperty(value = "jobDefinition") final Optional<JobDefinition> jobDefinition) { super(); Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request"); Preconditions.checkArg( clusterName != null & !clusterName.isEmpty(), "Must provide job cluster name in request"); Preconditions.checkNotNull(jobDefinition, "jobDefinition"); this.jobDefinition = jobDefinition; this.submitter = user; this.clusterName = clusterName; isAutoResubmit = false; } //quick submit public SubmitJobRequest(final String clusterName, final String user) { super(); Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request"); Preconditions.checkArg( clusterName != null & !clusterName.isEmpty(), "Must provide job cluster name in request"); this.jobDefinition = Optional.empty(); this.submitter = user; this.clusterName = clusterName; isAutoResubmit = false; } // used to during sla enforcement public SubmitJobRequest( final String clusterName, final String user, boolean isAutoResubmit, final Optional<JobDefinition> jobDefinition) { super(); Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request"); Preconditions.checkArg( clusterName != null & !clusterName.isEmpty(), "Must provide job cluster name in request"); this.jobDefinition = jobDefinition; this.submitter = user; this.clusterName = clusterName; this.isAutoResubmit = isAutoResubmit; } public Optional<JobDefinition> getJobDefinition() { return jobDefinition; } public String getSubmitter() { return submitter; } public String getClusterName() { return clusterName; } public boolean isAutoResubmit() { return isAutoResubmit; } @Override public String toString() { return "SubmitJobRequest{" + "jobDefinition=" + jobDefinition + ", submitter='" + submitter + '\'' + ", clusterName='" + clusterName + '\'' + ", isAutoResubmit=" + isAutoResubmit + '}'; } } public static final class SubmitJobResponse extends BaseResponse { private final Optional<JobId> jobId; public SubmitJobResponse( final long requestId, final ResponseCode responseCode, final String message, final Optional<JobId> jobId) { super(requestId, responseCode, message); this.jobId = jobId; } public Optional<JobId> getJobId() { return jobId; } @Override public String toString() { return "SubmitJobResponse{" + "jobId=" + jobId + ", requestId=" + requestId + ", responseCode=" + responseCode + ", message='" + message + '\'' + '}'; } } public static final class GetJobDetailsRequest extends BaseRequest { private final String user; private final JobId jobId; public GetJobDetailsRequest(final String user, final JobId jobId) { super(); this.jobId = jobId; this.user = user; } public GetJobDetailsRequest(final String user, final String jobId) { super(); Preconditions.checkNotNull(user, "user"); Preconditions.checkArg( jobId != null & !jobId.isEmpty(), "Must provide job ID in request"); Optional<JobId> jOp = JobId.fromId(jobId); if (jOp.isPresent()) { this.jobId = jOp.get(); } else { throw new IllegalArgumentException( String.format("Invalid jobId %s. JobId must be in the format [JobCLusterName-NumericID]", jobId)); } this.user = user; } public JobId getJobId() { return jobId; } public String getUser() { return user; } @Override public String toString() { return "GetJobDetailsRequest [jobId=" + jobId + ", user=" + user + "]"; } } public static final class GetJobDetailsResponse extends BaseResponse { private final Optional<IMantisJobMetadata> jobMetadata; public GetJobDetailsResponse( final long requestId, final ResponseCode responseCode, final String message, final Optional<IMantisJobMetadata> jobMetadata) { super(requestId, responseCode, message); this.jobMetadata = jobMetadata; } public Optional<IMantisJobMetadata> getJobMetadata() { return jobMetadata; } @Override public String toString() { return "GetJobDetailsResponse [jobMetadata=" + jobMetadata + "]"; } } public static final class GetLatestJobDiscoveryInfoRequest extends BaseRequest { private final String jobCluster; public GetLatestJobDiscoveryInfoRequest(final String jobCluster) { Preconditions.checkNotNull(jobCluster, "jobCluster"); this.jobCluster = jobCluster; } public String getJobCluster() { return jobCluster; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; GetLatestJobDiscoveryInfoRequest that = (GetLatestJobDiscoveryInfoRequest) o; return Objects.equals(jobCluster, that.jobCluster); } @Override public int hashCode() { return Objects.hash(jobCluster); } @Override public String toString() { return "GetLatestJobDiscoveryInfoRequest{" + "jobCluster='" + jobCluster + '\'' + '}'; } } public static final class GetLatestJobDiscoveryInfoResponse extends BaseResponse { private final Optional<JobSchedulingInfo> jobSchedulingInfo; public GetLatestJobDiscoveryInfoResponse( final long requestId, final ResponseCode code, final String msg, final Optional<JobSchedulingInfo> jobSchedulingInfo) { super(requestId, code, msg); this.jobSchedulingInfo = jobSchedulingInfo; } public Optional<JobSchedulingInfo> getDiscoveryInfo() { return jobSchedulingInfo; } } public static final class GetJobSchedInfoRequest extends BaseRequest { private final JobId jobId; public GetJobSchedInfoRequest(final JobId jobId) { this.jobId = jobId; } public JobId getJobId() { return jobId; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final GetJobSchedInfoRequest that = (GetJobSchedInfoRequest) o; return Objects.equals(jobId, that.jobId); } @Override public int hashCode() { return Objects.hash(jobId); } @Override public String toString() { return "GetJobStatusSubjectRequest{" + "jobId=" + jobId + '}'; } } public static final class GetJobSchedInfoResponse extends BaseResponse { private final Optional<BehaviorSubject<JobSchedulingInfo>> jobStatusSubject; public GetJobSchedInfoResponse( final long requestId, final ResponseCode code, final String msg, final Optional<BehaviorSubject<JobSchedulingInfo>> statusSubject) { super(requestId, code, msg); this.jobStatusSubject = statusSubject; } public Optional<BehaviorSubject<JobSchedulingInfo>> getJobSchedInfoSubject() { return jobStatusSubject; } } /** * Stream of JobId submissions for a cluster */ public static final class GetLastSubmittedJobIdStreamRequest extends BaseRequest { private final String clusterName; public GetLastSubmittedJobIdStreamRequest(final String clusterName) { Preconditions.checkArg( clusterName != null & !clusterName.isEmpty(), "Must provide job cluster name in request"); this.clusterName = clusterName; } public String getClusterName() { return clusterName; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final GetLastSubmittedJobIdStreamRequest that = (GetLastSubmittedJobIdStreamRequest) o; return Objects.equals(clusterName, that.clusterName); } @Override public int hashCode() { return Objects.hash(clusterName); } @Override public String toString() { return "GetLastSubmittedJobIdStreamRequest{" + "clusterName='" + clusterName + '\'' + '}'; } } public static final class GetLastSubmittedJobIdStreamResponse extends BaseResponse { private final Optional<BehaviorSubject<JobId>> jobIdBehaviorSubject; public GetLastSubmittedJobIdStreamResponse( final long requestId, final ResponseCode code, final String msg, final Optional<BehaviorSubject<JobId>> jobIdBehaviorSubject) { super(requestId, code, msg); this.jobIdBehaviorSubject = jobIdBehaviorSubject; } public Optional<BehaviorSubject<JobId>> getjobIdBehaviorSubject() { return this.jobIdBehaviorSubject; } } public static final class KillJobRequest extends BaseRequest { private final JobId jobId; private final String reason; private final String user; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public KillJobRequest( @JsonProperty("JobId") final String jobId, @JsonProperty("reason") final String reason, @JsonProperty("user") final String user) { super(); Preconditions.checkArg(user != null & !user.isEmpty(), "Must provide user in request"); Preconditions.checkArg( jobId != null & !jobId.isEmpty(), "Must provide job ID in request"); this.jobId = JobId.fromId(jobId).get(); this.reason = Optional.ofNullable(reason).orElse(""); this.user = user; } public JobId getJobId() { return jobId; } public String getReason() { return reason; } public String getUser() { return user; } @Override public String toString() { return "KillJobRequest [jobId=" + jobId + ", reason=" + reason + ", user=" + user + "]"; } } public static final class KillJobResponse extends BaseResponse { private final JobId jobId; private final JobState state; private final String user; public KillJobResponse( long requestId, ResponseCode responseCode, JobState state, String message, JobId jobId, String user) { super(requestId, responseCode, message); this.jobId = jobId; this.state = state; this.user = user; } public JobId getJobId() { return jobId; } public JobState getState() { return state; } public String getUser() { return user; } @Override public String toString() { return "KillJobResponse [jobId=" + jobId + ", state=" + state + ", user=" + user + "]"; } } public static final class ScaleStageRequest extends BaseRequest { private final int stageNum; private final int numWorkers; private final String user; private final String reason; private final JobId jobId; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public ScaleStageRequest( @JsonProperty("JobId") final String jobId, @JsonProperty("StageNumber") final Integer stageNo, @JsonProperty("NumWorkers") final Integer numWorkers, @JsonProperty("User") final String user, @JsonProperty("Reason") final String reason) { super(); Preconditions.checkArg( jobId != null & !jobId.isEmpty(), "Must provide job ID in request"); Preconditions.checkArg(stageNo > 0, "Invalid stage Number " + stageNo); Preconditions.checkArg( numWorkers != null && numWorkers > 0, "NumWorkers must be greater than 0"); this.stageNum = stageNo; this.numWorkers = numWorkers; this.user = Optional.ofNullable(user).orElse("UserNotKnown"); this.reason = Optional.ofNullable(reason).orElse(""); this.jobId = JobId.fromId(jobId).get(); } public int getStageNum() { return stageNum; } public int getNumWorkers() { return numWorkers; } public String getUser() { return user; } public String getReason() { return reason; } public JobId getJobId() { return jobId; } @Override public String toString() { return "ScaleStageRequest{" + "stageNum=" + stageNum + ", numWorkers=" + numWorkers + ", user='" + user + '\'' + ", reason='" + reason + '\'' + ", jobId=" + jobId + '}'; } } public static final class ScaleStageResponse extends BaseResponse { private final int actualNumWorkers; public ScaleStageResponse( final long requestId, final ResponseCode responseCode, final String message, final int actualNumWorkers) { super(requestId, responseCode, message); this.actualNumWorkers = actualNumWorkers; } public int getActualNumWorkers() { return actualNumWorkers; } @Override public String toString() { return "ScaleStageResponse{" + "actualNumWorkers=" + actualNumWorkers + '}'; } } public static final class ResubmitWorkerRequest extends BaseRequest { private final String user; private final JobId jobId; private final int workerNum; private final Optional<String> reason; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public ResubmitWorkerRequest( @JsonProperty("JobId") final String jobIdStr, @JsonProperty("workerNumber") final Integer workerNum, @JsonProperty("user") final String user, @JsonProperty("reason") final Optional<String> reason) { super(); Preconditions.checkArg( jobIdStr != null & !jobIdStr.isEmpty(), "Must provide job ID in request"); Preconditions.checkNotNull(workerNum, "workerNumber"); Preconditions.checkArg(workerNum > 0, "Worker number must be greater than 0"); this.jobId = JobId.fromId(jobIdStr) .orElseThrow(() -> new IllegalArgumentException( "invalid JobID in resubmit worker request " + jobIdStr)); this.workerNum = workerNum; this.user = user; this.reason = reason; } public JobId getJobId() { return jobId; } public int getWorkerNum() { return workerNum; } public String getUser() { return user; } public Optional<String> getReason() { return reason; } @Override public String toString() { return "ResubmitWorkerRequest{" + "user='" + user + '\'' + ", jobId=" + jobId + ", workerNum=" + workerNum + ", reason=" + reason + '}'; } } public static final class V1ResubmitWorkerRequest extends BaseRequest { private final String user; private final int workerNum; private final Optional<String> reason; @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public V1ResubmitWorkerRequest( @JsonProperty("workerNumber") final Integer workerNum, @JsonProperty("user") final String user, @JsonProperty("reason") final Optional<String> reason) { super(); Preconditions.checkNotNull(workerNum, "workerNumber"); Preconditions.checkArg(workerNum > 0, "Worker number must be greater than 0"); this.workerNum = workerNum; this.user = user; this.reason = reason; } public int getWorkerNum() { return workerNum; } public String getUser() { return user; } public Optional<String> getReason() { return reason; } @Override public String toString() { return "ResubmitWorkerRequest{" + "user='" + user + '\'' + ", workerNum=" + workerNum + ", reason=" + reason + '}'; } } public static final class ResubmitWorkerResponse extends BaseResponse { public ResubmitWorkerResponse( final long requestId, final ResponseCode responseCode, final String message) { super(requestId, responseCode, message); } @Override public String toString() { return "ResubmitWorkerResponse [requestId=" + requestId + ", respCode=" + responseCode + ", message=" + message + "]"; } } }
4,383
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/proto/BaseRequest.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.proto; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import java.util.concurrent.atomic.AtomicLong; public class BaseRequest { @JsonIgnore private static final AtomicLong counter = new AtomicLong(0); @JsonIgnore public final long requestId; public BaseRequest(long requestId) { this.requestId = requestId; } public BaseRequest() { this.requestId = counter.getAndIncrement(); } }
4,384
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/IMantisWorkerEventProcessor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.scheduler.WorkerEvent; /** * Declares behavior the Mantis worker event processor which is responsible for managing * Worker state. */ public interface IMantisWorkerEventProcessor { /** * Handles state transition for a worker. * @param event * @param jobStore * @throws Exception */ public void processEvent(WorkerEvent event, MantisJobStore jobStore) throws Exception; }
4,385
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/WorkerResubmitRateLimiter.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.StringTokenizer; import java.util.stream.Collectors; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.config.ConfigurationProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This class is not ThreadSafe. It is expected to be invoked by the JobActor * (which should guarantee no concurrent invocations. */ /* package */class WorkerResubmitRateLimiter { private static final Logger LOGGER = LoggerFactory.getLogger(WorkerResubmitRateLimiter.class); private static final String DEFAULT_WORKER_RESUBMIT_INTERVAL_SECS_STR = "5:10:20"; private final Map<String, ResubmitRecord> resubmitRecords = new HashMap<>(); private static final long DEFAULT_EXPIRE_RESUBMIT_DELAY_SECS = 300; private static final long DEFAULT_EXPIRE_RESUBMIT_DELAY_EXECUTION_INTERVAL_SECS = 120; private static final long DEFAULT_RESUBMISSION_INTERVAL_SECS = 10; private final long expireResubmitDelaySecs; private final long[] resubmitIntervalSecs; /** * Constructor for this class. * @param workerResubmitIntervalSecs * @param expireResubmitDelaySecs */ WorkerResubmitRateLimiter(String workerResubmitIntervalSecs, long expireResubmitDelaySecs) { Preconditions.checkArg(expireResubmitDelaySecs > 0, "Expire " + "Resubmit Delay cannot be 0 or less"); if (workerResubmitIntervalSecs == null || workerResubmitIntervalSecs.isEmpty()) workerResubmitIntervalSecs = DEFAULT_WORKER_RESUBMIT_INTERVAL_SECS_STR; StringTokenizer tokenizer = new StringTokenizer(workerResubmitIntervalSecs, ":"); if (tokenizer.countTokens() == 0) { this.resubmitIntervalSecs = new long[2]; this.resubmitIntervalSecs[0] = 0L; this.resubmitIntervalSecs[1] = DEFAULT_RESUBMISSION_INTERVAL_SECS; } else { this.resubmitIntervalSecs = new long[tokenizer.countTokens() + 1]; this.resubmitIntervalSecs[0] = 0L; for (int i = 1; i < this.resubmitIntervalSecs.length; i++) { final String s = tokenizer.nextToken(); try { this.resubmitIntervalSecs[i] = Long.parseLong(s); } catch (NumberFormatException e) { LOGGER.warn("Invalid number for resubmit interval " + s + ": using default " + DEFAULT_RESUBMISSION_INTERVAL_SECS); this.resubmitIntervalSecs[i] = DEFAULT_RESUBMISSION_INTERVAL_SECS; } } } this.expireResubmitDelaySecs = expireResubmitDelaySecs; } /** * Default constructor. */ WorkerResubmitRateLimiter() { this(ConfigurationProvider.getConfig().getWorkerResubmitIntervalSecs(), ConfigurationProvider.getConfig().getExpireWorkerResubmitDelaySecs()); } /** * Called periodically by Job Actor to purge old records. * * @param currentTime */ public void expireResubmitRecords(long currentTime) { Iterator<ResubmitRecord> it = resubmitRecords.values().iterator(); while (it.hasNext()) { ResubmitRecord record = it.next(); if (record.getResubmitAt() - record.getDelayedBy() < (currentTime - this.expireResubmitDelaySecs * 1000)) it.remove(); } } /** * Given a resubmit record pick the next delay in the array of delay in seconds configured. * * @param resubmitRecord * * @return */ long evalDelay(final ResubmitRecord resubmitRecord) { long delay = resubmitIntervalSecs[0]; if (resubmitRecord != null) { long prevDelay = resubmitRecord.getDelayedBy(); int index = 0; for (; index < resubmitIntervalSecs.length; index++) if (prevDelay <= resubmitIntervalSecs[index]) break; index++; if (index >= resubmitIntervalSecs.length) index = resubmitIntervalSecs.length - 1; delay = resubmitIntervalSecs[index]; } return delay; } /** * Used for testing. * * @param workerId * @param currentTime * * @return */ long getWorkerResubmitTime(final WorkerId workerId, final int stageNum, final long currentTime) { String workerKey = generateWorkerIndexStageKey(workerId, stageNum); final ResubmitRecord prevResubmitRecord = resubmitRecords.get(workerKey); long delay = evalDelay(prevResubmitRecord); long resubmitAt = currentTime + delay * 1000; final ResubmitRecord currResubmitRecord = new ResubmitRecord(workerKey, resubmitAt, delay); resubmitRecords.put(workerKey, currResubmitRecord); return resubmitAt; } /** * Get the worker resubmit time for the given worker. * * @param workerId * * @return */ public long getWorkerResubmitTime(final WorkerId workerId, final int stageNum) { return getWorkerResubmitTime(workerId, stageNum, System.currentTimeMillis()); } /** * Appends stage number and worker index. * @param workerId * @param stageNum * @return */ String generateWorkerIndexStageKey(WorkerId workerId, int stageNum) { return stageNum + "_" + workerId.getWorkerIndex(); } /** * clears the resubmit cache. */ void shutdown() { resubmitRecords.clear(); } /** * Returns the list of resubmit records. * @return */ List<ResubmitRecord> getResubmitRecords() { Map<String, ResubmitRecord> copy = new HashMap<>(resubmitRecords.size()); List<ResubmitRecord> resubmitRecordList = resubmitRecords.values().stream().collect(Collectors.toList()); return resubmitRecordList; } long getExpireResubmitDelaySecs() { return expireResubmitDelaySecs; } public long[] getResubmitIntervalSecs() { return resubmitIntervalSecs; } /** * Tracks information about a worker resubmit. */ static final class ResubmitRecord { private final String workerKey; private final long resubmitAt; private final long delayedBy; private ResubmitRecord(String workerKey, long resubmitAt, long delayedBy) { this.workerKey = workerKey; this.resubmitAt = resubmitAt; this.delayedBy = delayedBy; } public long getDelayedBy() { return delayedBy; } public String getWorkerKey() { return this.workerKey; } public long getResubmitAt() { return resubmitAt; } } }
4,386
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/FilterableMantisStageMetadataWritable.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import java.util.List; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.server.master.store.MantisStageMetadataWritable; @JsonFilter("stageMetadataList") public class FilterableMantisStageMetadataWritable extends MantisStageMetadataWritable { @JsonCreator @JsonIgnoreProperties(ignoreUnknown=true) public FilterableMantisStageMetadataWritable(@JsonProperty("jobId") String jobId, @JsonProperty("stageNum") int stageNum, @JsonProperty("numStages") int numStages, @JsonProperty("machineDefinition") MachineDefinition machineDefinition, @JsonProperty("numWorkers") int numWorkers, @JsonProperty("hardConstraints") List<JobConstraints> hardConstraints, @JsonProperty("softConstraints") List<JobConstraints> softConstraints, @JsonProperty("scalingPolicy") StageScalingPolicy scalingPolicy, @JsonProperty("scalable") boolean scalable) { super(jobId, stageNum, numStages, machineDefinition, numWorkers, hardConstraints, softConstraints, scalingPolicy, scalable); } }
4,387
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/MantisJobMetadataView.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.shaded.com.fasterxml.jackson.databind.annotation.JsonDeserialize; import io.mantisrx.shaded.com.google.common.collect.Lists; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.server.master.domain.DataFormatAdapter; import io.mantisrx.server.master.store.MantisJobMetadata; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.stream.Collectors; @JsonFilter("topLevelFilter") public class MantisJobMetadataView { @JsonIgnore private static final ObjectMapper mapper = new ObjectMapper(); private static final Logger logger = LoggerFactory.getLogger(MantisJobMetadataView.class); private FilterableMantisJobMetadataWritable jobMetadata; @JsonIgnore private long terminatedAt = -1; private List<FilterableMantisStageMetadataWritable> stageMetadataList = Lists.newArrayList(); private List<FilterableMantisWorkerMetadataWritable> workerMetadataList = Lists.newArrayList(); private String version = ""; public MantisJobMetadataView() {} @JsonCreator @JsonIgnoreProperties(ignoreUnknown=true) public MantisJobMetadataView(@JsonDeserialize(as=FilterableMantisJobMetadataWritable.class) @JsonProperty("jobMetadata") FilterableMantisJobMetadataWritable jobMeta, @JsonProperty("stageMetadataList") List<FilterableMantisStageMetadataWritable> stageMetadata, @JsonProperty("workerMetadataList") List<FilterableMantisWorkerMetadataWritable> workerMetadata) { this.jobMetadata = jobMeta; this.stageMetadataList = stageMetadata; this.workerMetadataList = workerMetadata; } public MantisJobMetadataView(IMantisJobMetadata jobMeta, long terminatedAt, List<Integer> stageNumberList, List<Integer> workerIndexList, List<Integer> workerNumberList, List<WorkerState.MetaState> workerStateList, boolean jobIdOnly) { if(logger.isTraceEnabled()) { logger.trace("Enter MantisJobMetadataView ctor jobMeta {} workerIndexList {} workerNumberList workerStateList {} jobIdOnly {}", workerIndexList, workerNumberList, workerStateList, jobIdOnly);} this.jobMetadata = DataFormatAdapter.convertMantisJobMetadataToFilterableMantisJobMetadataWriteable(jobMeta); this.terminatedAt = terminatedAt; version = jobMeta.getJobDefinition().getVersion(); if(logger.isDebugEnabled()) { logger.debug("MantisJobMetadataView.terminatedAt set to {}, version set to {}", terminatedAt, version); } if(!jobIdOnly) { if(logger.isDebugEnabled()) { logger.debug("MantisJobMetadataView.jobIdOnly is {}", jobIdOnly); } this.stageMetadataList = jobMeta.getStageMetadata().values().stream() .filter((IMantisStageMetadata mantisStageMetadata) -> stageFilter(mantisStageMetadata, stageNumberList)) .map(DataFormatAdapter::convertFilterableMantisStageMetadataToMantisStageMetadataWriteable) .collect(Collectors.toList()); this.workerMetadataList = jobMeta.getStageMetadata().values().stream() .map(IMantisStageMetadata::getAllWorkers) .flatMap(jobWorkers -> jobWorkers.stream() .map(jw -> jw.getMetadata()) .filter((IMantisWorkerMetadata workerMetadata) -> workerFilter(workerMetadata, workerIndexList, workerNumberList, workerStateList)) .map(DataFormatAdapter::convertMantisWorkerMetadataToFilterableMantisWorkerMetadataWritable) ) .collect(Collectors.toList()); } if(logger.isTraceEnabled()) { logger.trace("Exit MantisJobMetadataView ctor");} } public MantisJobMetadataView(IMantisJobMetadata jobMeta, final List<Integer> stageNumberList, final List<Integer> workerIndexList, final List<Integer> workerNumberList, final List<WorkerState.MetaState> workerStateList, final boolean jobIdOnly) { this(jobMeta, -1, stageNumberList,workerIndexList,workerNumberList,workerStateList,jobIdOnly); } private boolean stageFilter(IMantisStageMetadata msmd, List<Integer> stageNumberList) { if(logger.isTraceEnabled()) { logger.trace("Enter MantisJobMetadataView:stageFilter Stage {} stageNumberList {}", msmd, stageNumberList);} // no filter specified if(stageNumberList.isEmpty()) { if(logger.isTraceEnabled()) { logger.trace("Exit stageFilter with true for stage {}", msmd); } return true; } for(int stageNumber : stageNumberList) { if(stageNumber == msmd.getStageNum()) { if(logger.isTraceEnabled()) { logger.trace("Exit stageFilter with true for stage {}", msmd); } return true; } } if(logger.isTraceEnabled()) { logger.trace("Exit stageFilter with false for stage {}", msmd); } return false; } private boolean workerFilter(IMantisWorkerMetadata mwmd, final List<Integer> workerIndexList, final List<Integer> workerNumberList, final List<WorkerState.MetaState> workerStateList) { if(logger.isTraceEnabled()) { logger.trace("Enter MantisJobMetadataView:workerFilter worker {} indexList {} numberList {} stateList {}", mwmd, workerIndexList, workerNumberList, workerStateList);} boolean match=false; // no filter specified if(workerIndexList.isEmpty() && workerNumberList.isEmpty() && workerStateList.isEmpty()) { if(logger.isTraceEnabled()) { logger.trace("Exit workerFilter1 with true for worker {}", mwmd); } return true; } for(Integer workerIndex : workerIndexList) { if(workerIndex == mwmd.getWorkerIndex()) { if(logger.isTraceEnabled()) { logger.trace("Exit workerFilter2 with true for worker {}", mwmd); } match = true; } if(!match) { if(logger.isTraceEnabled()) { logger.trace("Exit workerFilter3 with true for worker {}", mwmd); } return false; } } for(Integer workerNumber : workerNumberList) { match = workerNumber == mwmd.getWorkerNumber(); if(!match) { if(logger.isTraceEnabled()) { logger.trace("Exit workerFilter4 with false for worker {}", mwmd); } return false; } } for(WorkerState.MetaState state : workerStateList) { match = false; try { match = WorkerState.toMetaState(mwmd.getState()).equals(state); } catch (IllegalArgumentException e) { } } if(!match) { if(logger.isTraceEnabled()) { logger.trace("Exit workerFilter5 with false for worker {}", mwmd); } return false; } if(logger.isTraceEnabled()) { logger.trace("Exit workerFilter6 with true for worker {}", mwmd); } return true; } public MantisJobMetadata getJobMetadata() { return jobMetadata; } public List<FilterableMantisStageMetadataWritable> getStageMetadataList() { return stageMetadataList; } public List<FilterableMantisWorkerMetadataWritable> getWorkerMetadataList() { return workerMetadataList; } public String getTerminatedAt() { if(terminatedAt == -1) { return ""; } else { return String.valueOf(this.terminatedAt); } } public String getVersion() { return this.version; } @Override public String toString() { try { return mapper.writeValueAsString(this); } catch (JsonProcessingException e) { return "MantisJobMetadataView [jobMetadata=" + jobMetadata + ", stageMetadataList=" + stageMetadataList + ", workerMetadataList=" + workerMetadataList + "]"; } } }
4,388
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/MantisJobMetadataImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import java.net.MalformedURLException; import java.net.URL; import java.time.Instant; import java.util.*; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.server.master.domain.DataFormatAdapter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.common.Label; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.parameter.Parameter; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.exceptions.InvalidJobException; import io.mantisrx.server.master.persistence.exceptions.InvalidJobStateChangeException; @JsonFilter("topLevelFilter") public class MantisJobMetadataImpl implements IMantisJobMetadata { private static final Logger logger = LoggerFactory.getLogger(MantisJobMetadataImpl.class); private final JobId jobId; private final long submittedAt; private long startedAt = DEFAULT_STARTED_AT_EPOCH; private long endedAt = DEFAULT_STARTED_AT_EPOCH; private JobState state; private int nextWorkerNumberToUse; private final JobDefinition jobDefinition; @JsonIgnore private final Map<Integer, IMantisStageMetadata> stageMetadataMap = new HashMap<>(); @JsonIgnore private final Map<Integer, Integer> workerNumberToStageMap = new HashMap<>(); @JsonCreator @JsonIgnoreProperties(ignoreUnknown=true) public MantisJobMetadataImpl(@JsonProperty("jobId") JobId jobId, @JsonProperty("submittedAt") long submittedAt, @JsonProperty("startedAt") long startedAt, @JsonProperty("jobDefinition") JobDefinition jobDefinition, @JsonProperty("state") JobState state, @JsonProperty("nextWorkerNumberToUse") int nextWorkerNumberToUse ) { this.jobId = jobId; this.submittedAt = submittedAt; this.startedAt = startedAt; this.state = state==null? JobState.Accepted : state; this.nextWorkerNumberToUse = nextWorkerNumberToUse; this.jobDefinition = jobDefinition; } @Override public JobId getJobId() { return jobId; } @Override public String getClusterName() { return this.jobDefinition.getName(); } @Override public Instant getSubmittedAtInstant() { return Instant.ofEpochMilli(submittedAt); } public long getSubmittedAt() { return submittedAt; } @Override public long getSubscriptionTimeoutSecs() { return this.jobDefinition.getSubscriptionTimeoutSecs(); } @Override public int getNextWorkerNumberToUse() { return nextWorkerNumberToUse; } public void setNextWorkerNumberToUse(int n, MantisJobStore store) throws Exception{ this.nextWorkerNumberToUse = n; store.updateJob(this); } @Override public JobState getState() { return state; } @Override public JobDefinition getJobDefinition() { return this.jobDefinition; } @Override public String getUser() { return this.jobDefinition.getUser(); } @Override public Optional<JobSla> getSla() { return Optional.ofNullable(this.jobDefinition.getJobSla()); } @Override public List<Parameter> getParameters() { return this.jobDefinition.getParameters(); } @Override public List<Label> getLabels() { return this.jobDefinition.getLabels(); } @JsonIgnore @Override public int getTotalStages() { return this.getJobDefinition().getNumberOfStages(); } @JsonIgnore @Override public String getArtifactName() { return this.jobDefinition.getArtifactName(); } void setJobState(JobState state, MantisJobStore store) throws Exception { logger.info("Updating job State from {} to {} ", this.state, state); if(!this.state.isValidStateChgTo(state)) throw new InvalidJobStateChangeException(jobId.getId(), this.state, state); this.state = state; store.updateJob(this); } void setStartedAt(long startedAt, MantisJobStore store) throws Exception { logger.info("Updating job start time to {} ", startedAt); this.startedAt = startedAt; store.updateJob(this); } /** * Add job stage if absent, returning true if it was actually added. * @param msmd The stage's metadata object. * @return true if actually added, false otherwise. */ public boolean addJobStageIfAbsent(IMantisStageMetadata msmd) { if(logger.isTraceEnabled()) { logger.trace("Adding stage {} ", msmd); } boolean result = stageMetadataMap.put(msmd.getStageNum(), msmd) == null; msmd.getAllWorkers().stream().forEach((worker) -> { workerNumberToStageMap.put(worker.getMetadata().getWorkerNumber(), msmd.getStageNum()); }); return result; } @JsonIgnore public Map<Integer,? extends IMantisStageMetadata> getStageMetadata() { return Collections.unmodifiableMap(stageMetadataMap); } public final Map<Integer, Integer> getWorkerNumberToStageMap() { return Collections.unmodifiableMap(this.workerNumberToStageMap); } @JsonIgnore public Optional<IMantisStageMetadata> getStageMetadata(int stageNum) { return Optional.ofNullable(stageMetadataMap.get(stageNum)); } /** * Replace meta data for the given worker with a newly created worker that has not been dispatched yet. * Dispatch happens after this method returns. * Delegates the actual replacing to occur in the StageMetadata. * @param stageNum * @param newWorker * @param oldWorker * @param jobStore * @return * @throws Exception */ boolean replaceWorkerMetaData(int stageNum, JobWorker newWorker, JobWorker oldWorker, MantisJobStore jobStore) throws Exception { boolean result=true; ((MantisStageMetadataImpl)stageMetadataMap.get(stageNum)).replaceWorkerIndex(newWorker, oldWorker, jobStore); // remove mapping for replaced worker removeWorkerMetadata(oldWorker.getMetadata().getWorkerNumber()); Integer integer = workerNumberToStageMap.put(newWorker.getMetadata().getWorkerNumber(), stageNum); if(integer != null && integer != stageNum) { logger.error(String.format("Unexpected to put worker number mapping from %d to stage %d for job %s, prev mapping to stage %d", newWorker.getMetadata().getWorkerNumber(), stageNum, newWorker.getMetadata().getJobId(), integer)); } return result; } public boolean addWorkerMetadata(int stageNum, JobWorker newWorker) throws InvalidJobException { if(logger.isTraceEnabled()) { logger.trace("Adding workerMetadata {} for stage {}", stageNum, newWorker); } boolean result=true; if(!stageMetadataMap.containsKey(stageNum)) { logger.warn("No such stage {}", stageNum); } if(!((MantisStageMetadataImpl)stageMetadataMap.get(stageNum)).addWorkerIndex(newWorker)) result=false; Integer integer = workerNumberToStageMap.put(newWorker.getMetadata().getWorkerNumber(), stageNum); if(integer != null && integer != stageNum) { logger.error(String.format("Unexpected to put worker number mapping from %d to stage %d for job %s, prev mapping to stage %d", newWorker.getMetadata().getWorkerNumber(), stageNum, newWorker.getMetadata().getJobId(), integer)); } if(logger.isTraceEnabled()) { logger.trace("Exit addworkerMeta {}", workerNumberToStageMap); } return result; } boolean removeWorkerMetadata(int workerNumber) { if(workerNumberToStageMap.containsKey(workerNumber)) { workerNumberToStageMap.remove(workerNumber); return true; } return false; } @JsonIgnore public Optional<JobWorker> getWorkerByIndex(int stageNumber, int workerIndex) throws InvalidJobException { Optional<IMantisStageMetadata> stage = getStageMetadata(stageNumber); if(stage.isPresent()) { return Optional.ofNullable(stage.get().getWorkerByIndex(workerIndex)); } return Optional.empty(); //throw new InvalidJobException(jobId, stageNumber, workerIndex); } @JsonIgnore public Optional<JobWorker> getWorkerByNumber(int workerNumber) throws InvalidJobException { Integer stageNumber = workerNumberToStageMap.get(workerNumber); if(stageNumber == null) { return Optional.empty(); } IMantisStageMetadata stage = stageMetadataMap.get(stageNumber); if(stage == null) { return Optional.empty(); } return Optional.ofNullable(stage.getWorkerByWorkerNumber(workerNumber)); } @JsonIgnore public int getMaxWorkerNumber() { // Expected to be called only during initialization, no need to synchronize/lock. // Resubmitted workers are expected to have a worker number greater than those they replace. int max=-1; for(int id: workerNumberToStageMap.keySet()) if(max < id) max = id; return max; } @JsonIgnore @Override public SchedulingInfo getSchedulingInfo() { return this.jobDefinition.getSchedulingInfo(); } @Override public long getMinRuntimeSecs() { return this.jobDefinition.getJobSla().getMinRuntimeSecs(); } /** * Migrate to using the getArtifactName and getArtifactVersion */ @Deprecated @Override public URL getJobJarUrl() { try { return DataFormatAdapter.generateURL(getArtifactName()); } catch (MalformedURLException e) { // should not happen throw new RuntimeException(e); } } @Override public Optional<Instant> getStartedAtInstant() { if(this.startedAt == DEFAULT_STARTED_AT_EPOCH) { return Optional.empty(); } else { return Optional.of(Instant.ofEpochMilli(startedAt)); } } public long getStartedAt() { return this.startedAt; } @Override public Optional<Instant> getEndedAtInstant() { if(this.endedAt == DEFAULT_STARTED_AT_EPOCH) { return Optional.empty(); } else { return Optional.of(Instant.ofEpochMilli(endedAt)); } } public long getEndedAt() { return this.endedAt; } public static class Builder { JobId jobId; String user; JobDefinition jobDefinition; long submittedAt; long startedAt; JobState state; int nextWorkerNumberToUse = 1; public Builder() { } public Builder withJobId(JobId jobId) { this.jobId = jobId; return this; } public Builder withJobDefinition(JobDefinition jD) { this.jobDefinition = jD; return this; } public Builder withSubmittedAt(long submittedAt) { this.submittedAt = submittedAt; return this; } public Builder withSubmittedAt(Instant submittedAt) { this.submittedAt = submittedAt.toEpochMilli(); return this; } public Builder withStartedAt(Instant startedAt) { this.startedAt = startedAt.toEpochMilli(); return this; } public Builder withJobState(JobState state) { this.state = state; return this; } public Builder withNextWorkerNumToUse(int workerNum) { this.nextWorkerNumberToUse = workerNum; return this; } public Builder from(MantisJobMetadataImpl mJob) { this.jobId = mJob.getJobId(); this.jobDefinition = mJob.getJobDefinition(); this.submittedAt = mJob.getSubmittedAt(); this.state = mJob.getState(); this.nextWorkerNumberToUse = mJob.getNextWorkerNumberToUse(); return this; } public MantisJobMetadataImpl build() { return new MantisJobMetadataImpl(jobId, submittedAt, startedAt, jobDefinition, state, nextWorkerNumberToUse); } } @Override public String toString() { return "MantisJobMetadataImpl{" + "jobId=" + jobId + ", submittedAt=" + submittedAt + ", startedAt=" + startedAt + ", endedAt=" + endedAt + ", state=" + state + ", nextWorkerNumberToUse=" + nextWorkerNumberToUse + ", jobDefinition=" + jobDefinition + ", stageMetadataMap=" + stageMetadataMap + ", workerNumberToStageMap=" + workerNumberToStageMap + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; MantisJobMetadataImpl that = (MantisJobMetadataImpl) o; return submittedAt == that.submittedAt && startedAt == that.startedAt && endedAt == that.endedAt && nextWorkerNumberToUse == that.nextWorkerNumberToUse && Objects.equals(jobId, that.jobId) && state == that.state && Objects.equals(jobDefinition, that.jobDefinition); } @Override public int hashCode() { return Objects.hash(jobId, submittedAt, startedAt, endedAt, state, nextWorkerNumberToUse, jobDefinition); } }
4,389
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/IWorkerManager.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import java.time.Instant; import java.util.List; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.master.scheduler.WorkerEvent; import rx.subjects.BehaviorSubject; /** * Declares the behavior of the WorkerManager which is embedded within a JobManager. */ public interface IWorkerManager { /** * Perform any cleanup during job shutdown. */ void shutdown(); /** * Handle worker related events. * * @param event * @param jobState */ void processEvent(WorkerEvent event, JobState jobState); /** * Iterate through all active workers and identify and restart workers that have not sent a heart beat * within a configured time. * * @param now */ void checkHeartBeats(Instant now); /** * Invoked during Agent deploy. Resubmit workers that are currently running on old VMs. * * @param now */ void migrateDisabledVmWorkers(Instant now); /** * Increase or decrease the number of workers associated with the given stage. * * @param stageMetaData * @param numWorkers * @param reason * * @return */ int scaleStage(MantisStageMetadataImpl stageMetaData, int numWorkers, String reason); /** * Explicitly kill and resubmit worker associated with the given workerNumber. * * @param workerNumber * * @throws Exception */ void resubmitWorker(int workerNumber) throws Exception; /** * Get a list of currently active workers {@link IMantisWorkerMetadata}. * * @param limit * * @return */ List<IMantisWorkerMetadata> getActiveWorkers(int limit); /** * Returns a {@link BehaviorSubject} where job status updates are published. * * @return */ BehaviorSubject<JobSchedulingInfo> getJobStatusSubject(); /** * Force sending any updates in worker data. */ void refreshAndSendWorkerAssignments(); }
4,390
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/MantisStageMetadataImpl.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import static java.util.Optional.of; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import com.netflix.spectator.impl.Preconditions; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.master.jobcluster.job.worker.WorkerTerminate; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.master.WorkerRequest; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.exceptions.InvalidJobException; import io.mantisrx.server.master.scheduler.WorkerEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Implements the {@link IMantisStageMetadata} interface. Represents information related to a Job stage. */ public class MantisStageMetadataImpl implements IMantisStageMetadata { private final JobId jobId; private final int stageNum; private final int numStages; private final MachineDefinition machineDefinition; private int numWorkers; @JsonIgnore private boolean isSubscribed = false; private final List<JobConstraints> hardConstraints; private final List<JobConstraints> softConstraints; // scaling policy be null private StageScalingPolicy scalingPolicy; private boolean scalable; @JsonIgnore private final ConcurrentMap<Integer, JobWorker> workerByIndexMetadataSet; @JsonIgnore private final ConcurrentMap<Integer, JobWorker> workerByNumberMetadataSet; private static final Logger LOGGER = LoggerFactory.getLogger(MantisStageMetadataImpl.class); /** * Default constructor. * @param jobId * @param stageNum * @param numStages * @param machineDefinition * @param numWorkers * @param hardConstraints * @param softConstraints * @param scalingPolicy * @param scalable */ @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public MantisStageMetadataImpl(@JsonProperty("jobId") JobId jobId, @JsonProperty("stageNum") int stageNum, @JsonProperty("numStages") int numStages, @JsonProperty("machineDefinition") MachineDefinition machineDefinition, @JsonProperty("numWorkers") int numWorkers, @JsonProperty("hardConstraints") List<JobConstraints> hardConstraints, @JsonProperty("softConstraints") List<JobConstraints> softConstraints, @JsonProperty("scalingPolicy") StageScalingPolicy scalingPolicy, @JsonProperty("scalable") boolean scalable) { this.jobId = jobId; this.stageNum = stageNum; this.numStages = numStages; this.machineDefinition = machineDefinition; this.numWorkers = numWorkers; this.hardConstraints = hardConstraints; this.softConstraints = softConstraints; this.scalingPolicy = scalingPolicy; this.scalable = scalable; workerByIndexMetadataSet = new ConcurrentHashMap<>(); workerByNumberMetadataSet = new ConcurrentHashMap<>(); } @Override public JobId getJobId() { return jobId; } @Override public int getStageNum() { return stageNum; } @Override public int getNumStages() { return numStages; } @Override public int getNumWorkers() { return numWorkers; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; MantisStageMetadataImpl that = (MantisStageMetadataImpl) o; return stageNum == that.stageNum && numStages == that.numStages && numWorkers == that.numWorkers && scalable == that.scalable && Objects.equals(jobId, that.jobId) && Objects.equals(machineDefinition, that.machineDefinition) && Objects.equals(hardConstraints, that.hardConstraints) && Objects.equals(softConstraints, that.softConstraints) && Objects.equals(scalingPolicy, that.scalingPolicy); } @Override public int hashCode() { return Objects.hash(jobId, stageNum, numStages, machineDefinition, numWorkers, hardConstraints, softConstraints, scalingPolicy, scalable); } /** * Builder to create an instance of {@link MantisStageMetadataImpl}. */ public static class Builder { private JobId jobId; private int stageNum = -1; private int numStages = 0; private MachineDefinition machineDefinition; private int numWorkers = 0; private List<JobConstraints> hardConstraints = Collections.emptyList(); private List<JobConstraints> softConstraints = Collections.emptyList(); private StageScalingPolicy scalingPolicy; private boolean scalable; /** * Ctor. */ public Builder() { } /** * Sets the {@link JobId}. * @param jId * @return */ public Builder withJobId(JobId jId) { this.jobId = jId; return this; } /** * Sets the stage number. * @param stageNum * @return */ public Builder withStageNum(int stageNum) { this.stageNum = stageNum; return this; } /** * Sets the total number of stages. * @param numStages * @return */ public Builder withNumStages(int numStages) { this.numStages = numStages; return this; } /** * Sets the {@link MachineDefinition} to be used by the workers of this stage. * @param md * @return */ public Builder withMachineDefinition(MachineDefinition md) { this.machineDefinition = md; return this; } /** * The total number of workers in this stage. * @param numWorkers * @return */ public Builder withNumWorkers(int numWorkers) { this.numWorkers = numWorkers; return this; } /** * Sets the mandatory scheduling constraints associated with this stage. * @param hardC * @return */ public Builder withHardConstraints(List<JobConstraints> hardC) { if (hardC != null) { this.hardConstraints = hardC; } return this; } /** * Sets the best effort scheduling constraints associated with this stage. * @param softC * @return */ public Builder withSoftConstraints(List<JobConstraints> softC) { if (softC != null) { this.softConstraints = softC; } return this; } /** * The scaling policy associated with this stage. * @param pol * @return */ public Builder withScalingPolicy(StageScalingPolicy pol) { this.scalingPolicy = pol; return this; } /** * Sets the whether this stage is scalable. * @param s * @return */ public Builder isScalable(boolean s) { scalable = s; return this; } /** * Convenience method to clone data from an old worker of this stage. * @param workerRequest * @return */ public Builder from(WorkerRequest workerRequest) { Objects.requireNonNull(workerRequest); this.jobId = (JobId.fromId(workerRequest.getJobId()).orElse(null)); this.stageNum = (workerRequest.getWorkerStage()); this.numStages = (workerRequest.getTotalStages()); this.machineDefinition = (workerRequest.getDefinition()); this.numWorkers = (workerRequest.getNumInstancesAtStage()); this.hardConstraints = (workerRequest.getHardConstraints() != null ? workerRequest.getHardConstraints() : new ArrayList<>()); this.softConstraints = (workerRequest.getSoftConstraints() != null ? workerRequest.getSoftConstraints() : new ArrayList<>()); this.scalingPolicy = (workerRequest.getSchedulingInfo().forStage( workerRequest.getWorkerStage()).getScalingPolicy()); this.scalable = (workerRequest.getSchedulingInfo().forStage(workerRequest.getWorkerStage()).getScalable()); return this; } /** * Builds and returns an instance of {@link MantisStageMetadataImpl}. * @return */ public IMantisStageMetadata build() { Objects.requireNonNull(jobId, "JobId cannot be null"); //Objects.requireNonNull(scalingPolicy, "Scaling policy cannot be null"); if (stageNum <= -1) { throw new IllegalArgumentException(String.format("Invalid stage number {} ", stageNum)); } if (numStages <= 0) { throw new IllegalArgumentException(String.format("Invalid no of stages {} ", numStages)); } return new MantisStageMetadataImpl(jobId, stageNum, numStages, machineDefinition, numWorkers, hardConstraints, softConstraints, scalingPolicy, scalable); } } /** * Updates the total number of workers in this stage. * @param numWorkers * @param store * @throws Exception */ public void unsafeSetNumWorkers(int numWorkers, MantisJobStore store) throws Exception { this.numWorkers = numWorkers; store.updateStage(this); } /** * Removes the referenced worker from this stage. * @param index * @param number * @param store * @return */ public boolean unsafeRemoveWorker(int index, int number, MantisJobStore store) { final JobWorker removedIdx = workerByIndexMetadataSet.remove(index); final JobWorker removedNum = workerByNumberMetadataSet.remove(number); if (removedIdx != null && removedNum != null && removedIdx.getMetadata().getWorkerNumber() == number && removedNum.getMetadata().getWorkerIndex() == index) { LOGGER.info("Worker index {} - number {} marked for deletion", index, number); // pendingDeleteWorkerMap.put(number, removedNum); try { archiveWorker(removedIdx.getMetadata(), store); } catch (IOException e) { e.printStackTrace(); } return true; } return false; } @Override public List<JobConstraints> getHardConstraints() { return Collections.unmodifiableList(hardConstraints); } @Override public List<JobConstraints> getSoftConstraints() { return Collections.unmodifiableList(softConstraints); } @Override public StageScalingPolicy getScalingPolicy() { return scalingPolicy; } @Override public boolean getScalable() { return scalable; } @Override public MachineDefinition getMachineDefinition() { return machineDefinition; } @Deprecated @JsonIgnore @Override public Collection<JobWorker> getWorkerByIndexMetadataSet() { return Collections.unmodifiableCollection(workerByIndexMetadataSet.values()); } @JsonIgnore @Override public Collection<JobWorker> getAllWorkers() { return Collections.unmodifiableCollection(workerByNumberMetadataSet.values()); } @JsonIgnore @Override public JobWorker getWorkerByIndex(int workerId) throws InvalidJobException { JobWorker worker = workerByIndexMetadataSet.get(workerId); if (worker == null) throw new InvalidJobException(jobId, -1, workerId); return worker; } @JsonIgnore @Override public JobWorker getWorkerByWorkerNumber(int workerNumber) throws InvalidJobException { JobWorker worker = workerByNumberMetadataSet.get(workerNumber); if (worker == null) throw new InvalidJobException(jobId, -1, workerNumber); return worker; } /** * Remove the given worker from the stage if it is in a terminal state. * @param workerNumber * @return */ JobWorker removeWorkerInFinalState(int workerNumber) { JobWorker worker = workerByNumberMetadataSet.get(workerNumber); if (worker != null && WorkerState.isTerminalState(worker.getMetadata().getState())) { workerByNumberMetadataSet.remove(workerNumber); return worker; } return null; } /** * Removes any workers from the state that are in a terminal state. * @return */ public Collection<JobWorker> removeArchiveableWorkers() { Collection<JobWorker> removedWorkers = new LinkedList<>(); Set<Integer> workerNumbers = new HashSet<>(workerByNumberMetadataSet.keySet()); for (Integer w : workerNumbers) { JobWorker worker = workerByNumberMetadataSet.get(w); final JobWorker wi = workerByIndexMetadataSet.get(worker.getMetadata().getWorkerIndex()); if (wi == null || wi.getMetadata().getWorkerNumber() != worker.getMetadata().getWorkerNumber()) { workerByNumberMetadataSet.remove(w); removedWorkers.add(worker); } } return removedWorkers; } /** * Replace the old worker with the new worker. New worker has not been scheduled yet so it is * just an in memory representation. * Invalid conditions: * 1. New worker is in error state * 2. Old worker Index != new worker Index * 3. Given Old worker is in fact the one associated with this index * <p> * Does the following: * 1. Marks old worker as terminated * 2. Associates new worker to this index * 3. Removes old worker from number -> worker set * 4. Saves the data to the store * 5. Associates new worker number -> new worker * 6. archives the worker * * @param newWorker * @param oldWorker * @param jobStore */ public void replaceWorkerIndex(JobWorker newWorker, JobWorker oldWorker, MantisJobStore jobStore) throws Exception { Preconditions.checkNotNull(newWorker, "Replacement worker cannot be null"); Preconditions.checkNotNull(oldWorker, "old worker cannot be null"); if (LOGGER.isDebugEnabled()) { LOGGER.debug("In MantisStageMetadataImpl:replaceWorkerIndex oldWorker {} new Worker {} for Job {}", oldWorker, newWorker, this.getJobId()); } IMantisWorkerMetadata newWorkerMetadata = newWorker.getMetadata(); IMantisWorkerMetadata oldWorkerMetadata = oldWorker.getMetadata(); int index = newWorkerMetadata.getWorkerIndex(); boolean result = true; // check if new worker is in error state if (WorkerState.isErrorState(newWorkerMetadata.getState())) { // should not get here String errMsg = String.format("New worker cannot be in error state %s", newWorkerMetadata.getState()); LOGGER.error(errMsg); throw new IllegalStateException(errMsg); } // if old worker is null, ensure no other worker is associated with this index if (!workerByIndexMetadataSet.containsKey(index)) { // This index is associated with some worker but given oldWorker is null abort. String errMsg = String.format("Index %s does not exist in workerByIndexMetadataSet %s for job %s", index, workerByIndexMetadataSet, this.jobId); throw new IllegalArgumentException(errMsg); } else { if (oldWorkerMetadata.getWorkerIndex() != index) { String errMsg = String.format("While replacing worker in Job %s , Old worker Index %s does not match " + "the new worker index %s", this.jobId, oldWorkerMetadata.getWorkerIndex(), index); LOGGER.error(errMsg); throw new IllegalArgumentException(errMsg); } LOGGER.debug("workerByIndexMetadatSet {}", workerByIndexMetadataSet); // confirm old worker is present in the workerByIndexSet with the given worker number JobWorker worker = workerByIndexMetadataSet.get(index); if (worker.getMetadata().getWorkerNumber() != oldWorkerMetadata.getWorkerNumber()) { String errMsg = ("Did not replace worker " + oldWorkerMetadata.getWorkerNumber() + " with " + newWorkerMetadata.getWorkerNumber() + " for index " + newWorkerMetadata.getWorkerIndex() + " of job " + jobId + ", different worker " + worker.getMetadata().getWorkerNumber() + " exists already"); throw new IllegalArgumentException(errMsg); } else { // mark old worker as terminated processWorkerEvent(new WorkerTerminate(oldWorkerMetadata.getWorkerId(), WorkerState.Failed, JobCompletedReason.Relaunched, System.currentTimeMillis()), jobStore); // insert new worker workerByIndexMetadataSet.put(index, newWorker); // remove old worker from workerNumberSet removeWorkerInFinalState(oldWorkerMetadata.getWorkerNumber()); // persist changes jobStore.replaceTerminatedWorker(oldWorkerMetadata, newWorkerMetadata); workerByNumberMetadataSet.put(newWorkerMetadata.getWorkerNumber(), newWorker); // archive worker try { archiveWorker(oldWorkerMetadata, jobStore); } catch (Exception e) { LOGGER.error("Exception archiving worker", e); } LOGGER.info("Replaced worker " + oldWorkerMetadata.getWorkerNumber() + " with " + newWorkerMetadata.getWorkerNumber() + " for index " + newWorkerMetadata.getWorkerIndex() + " of job " + jobId); } } } private void archiveWorker(IMantisWorkerMetadata worker, MantisJobStore jobStore) throws IOException { jobStore.archiveWorker(worker); } /** * Adds the given {@link JobWorker} to this stage. * @param newWorker * @return */ public boolean addWorkerIndex(JobWorker newWorker) { IMantisWorkerMetadata newWorkerMetadata = newWorker.getMetadata(); if (workerByIndexMetadataSet.putIfAbsent(newWorkerMetadata.getWorkerIndex(), newWorker) != null) { LOGGER.warn("WorkerIndex {} already exists. Existing worker={} ", newWorkerMetadata.getWorkerIndex(), workerByIndexMetadataSet.get( newWorkerMetadata.getWorkerIndex())); return false; } workerByNumberMetadataSet.put(newWorkerMetadata.getWorkerNumber(), newWorker); return true; } /** * Updates the the state of a worker based on the worker event. * @param event * @param jobStore * @return */ public Optional<JobWorker> processWorkerEvent(WorkerEvent event, MantisJobStore jobStore) { try { JobWorker worker = getWorkerByIndex(event.getWorkerId().getWorkerIndex()); worker.processEvent(event, jobStore); return of(worker); } catch (Exception e) { LOGGER.warn("Exception saving worker update", e); } return Optional.empty(); } /** * Iterates through all workers of this stage and returns true if all workers are in started state. * @return */ @JsonIgnore public boolean isAllWorkerStarted() { for (JobWorker w : workerByIndexMetadataSet.values()) { if (!w.getMetadata().getState().equals(WorkerState.Started)) return false; } return true; } /** * Iterates through all workers of this stage and returns true if all workers are in terminal state. * @return */ @JsonIgnore public boolean isAllWorkerCompleted() { for (JobWorker w : workerByIndexMetadataSet.values()) { if (!WorkerState.isTerminalState(w.getMetadata().getState())) { LOGGER.debug("isAllWorkerCompleted returns false"); return false; } } LOGGER.info("isAllWorkerCompleted returns true"); return true; } /** * Returns the number of workers that are in started state. * @return */ @JsonIgnore public int getNumStartedWorkers() { int startedCount = 0; for (JobWorker w : workerByIndexMetadataSet.values()) { if (w.getMetadata().getState().equals(WorkerState.Started)) startedCount++; } return startedCount; } @Override public String toString() { return "MantisStageMetadataImpl [jobId=" + jobId + ", stageNum=" + stageNum + ", numStages=" + numStages + ", machineDefinition=" + machineDefinition + ", numWorkers=" + numWorkers + ", hardConstraints=" + hardConstraints + ", softConstraints=" + softConstraints + ", scalingPolicy=" + scalingPolicy + ", scalable=" + scalable + ", workerByIndexMetadataSet=" + workerByIndexMetadataSet + ", workerByNumberMetadataSet=" + workerByNumberMetadataSet + "]"; } }
4,391
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/JobActor.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import static io.mantisrx.master.StringConstants.MANTIS_MASTER_USER; import static io.mantisrx.master.events.LifecycleEventsProto.StatusEvent.StatusEventType.ERROR; import static io.mantisrx.master.events.LifecycleEventsProto.StatusEvent.StatusEventType.INFO; import static io.mantisrx.master.events.LifecycleEventsProto.StatusEvent.StatusEventType.WARN; import static io.mantisrx.master.jobcluster.job.worker.MantisWorkerMetadataImpl.MANTIS_SYSTEM_ALLOCATED_NUM_PORTS; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR; import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS; import static java.util.Optional.empty; import static java.util.Optional.of; import static java.util.Optional.ofNullable; import java.io.IOException; import java.time.Duration; import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentSkipListSet; import java.util.stream.Collectors; import akka.actor.AbstractActorWithTimers; import akka.actor.ActorRef; import akka.actor.PoisonPill; import akka.actor.Props; import akka.actor.SupervisorStrategy; import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper; import io.mantisrx.shaded.com.google.common.base.Preconditions; import io.mantisrx.shaded.com.google.common.collect.Lists; import com.netflix.fenzo.ConstraintEvaluator; import com.netflix.fenzo.VMTaskFitnessCalculator; import com.netflix.spectator.api.BasicTag; import io.mantisrx.common.WorkerPorts; import io.mantisrx.common.metrics.Counter; import io.mantisrx.common.metrics.Metrics; import io.mantisrx.common.metrics.MetricsRegistry; import io.mantisrx.common.metrics.spectator.MetricGroupId; import io.mantisrx.master.akka.MantisActorSupervisorStrategy; import io.mantisrx.master.events.LifecycleEventPublisher; import io.mantisrx.master.events.LifecycleEventsProto; import io.mantisrx.master.jobcluster.WorkerInfoListHolder; import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.master.jobcluster.job.worker.WorkerStatus; import io.mantisrx.master.jobcluster.job.worker.WorkerTerminate; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobSchedInfoResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.KillJobResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListWorkersRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListWorkersResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerResponse; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageResponse; import io.mantisrx.master.jobcluster.proto.JobClusterProto; import io.mantisrx.master.jobcluster.proto.JobProto; import io.mantisrx.master.jobcluster.proto.JobProto.InitJob; import io.mantisrx.master.jobcluster.proto.JobProto.JobInitialized; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.MantisJobDurationType; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.MigrationStrategy; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.runtime.descriptor.StageSchedulingInfo; import io.mantisrx.server.core.JobCompletedReason; import io.mantisrx.server.core.JobSchedulingInfo; import io.mantisrx.server.core.Status; import io.mantisrx.server.core.WorkerAssignments; import io.mantisrx.server.core.WorkerHost; import io.mantisrx.server.core.domain.JobMetadata; import io.mantisrx.server.core.domain.WorkerId; import io.mantisrx.server.master.ConstraintsEvaluators; import io.mantisrx.server.master.InvalidJobRequest; import io.mantisrx.server.master.agentdeploy.MigrationStrategyFactory; import io.mantisrx.server.master.config.ConfigurationProvider; import io.mantisrx.server.master.config.MasterConfiguration; import io.mantisrx.server.master.domain.DataFormatAdapter; import io.mantisrx.server.master.domain.IJobClusterDefinition; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.MantisJobStore; import io.mantisrx.server.master.persistence.exceptions.InvalidJobException; import io.mantisrx.server.master.persistence.exceptions.InvalidWorkerStateChangeException; import io.mantisrx.server.master.scheduler.MantisScheduler; import io.mantisrx.server.master.scheduler.ScheduleRequest; import io.mantisrx.server.master.scheduler.WorkerEvent; import io.mantisrx.server.master.scheduler.WorkerOnDisabledVM; import io.mantisrx.server.master.scheduler.WorkerUnscheduleable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observable; import rx.schedulers.Schedulers; import rx.subjects.BehaviorSubject; /** * Actor responsible for handling all operations for a given JobID. * private static final String API_JOB_SUBMIT_PATH="/api/submit"; * private static final String API_JOB_KILL="/api/jobs/kill"; * private static final String API_JOB_STAGE_SCALE="/api/jobs/scaleStage"; * private static final String API_JOB_RESUBMIT_WORKER="/api/jobs/resubmitWorker"; * * @author njoshi */ public class JobActor extends AbstractActorWithTimers implements IMantisJobManager { private static final String CHECK_HB_TIMER_KEY = "CHECK_HB"; private static final String REFRESH_SEND_STAGE_ASSIGNEMNTS_KEY = "REFRESH_SEND_STAGE_ASSIGNMENTS"; private static final Logger LOGGER = LoggerFactory.getLogger(JobActor.class); private static final double DEFAULT_JOB_MASTER_CORES = 1; private static final double DEFAULT_JOB_MASTER_MEM = 1024; private static final double DEFAULT_JOB_MASTER_NW = 128; private static final double DEFAULT_JOB_MASTER_DISK = 1024; private final Metrics metrics; private final MetricGroupId metricsGroupId; private final Counter numWorkerResubmissions; private final Counter numWorkerResubmitLimitReached; private final Counter numWorkerTerminated; private final Counter numScaleStage; private final Counter numWorkersCompletedNotTerminal; private final Counter numSchedulingChangesRefreshed; private final Counter numMissingWorkerPorts; /** * Behavior after being initialized. */ private Receive initializedBehavior; /** * Behavior once active. */ private Receive activeBehavior; /** * Behavior during termination. */ private Receive terminatingBehavior; /** * Behavior after termination waiting for JCA to terminate actor. */ private Receive terminatedBehavior; private final String clusterName; private final JobId jobId; private final IJobClusterDefinition jobClusterDefinition; private volatile MantisJobMetadataImpl mantisJobMetaData; private final MantisJobStore jobStore; // load from config private int workerWritesBatchSize = 10; // Manages life cycle of worker private IWorkerManager workerManager = null; // Used to schedule and unschedule workers private final MantisScheduler mantisScheduler; private final LifecycleEventPublisher eventPublisher; private boolean hasJobMaster; private volatile boolean allWorkersCompleted = false; /** * Used by the JobCluster Actor to create this Job Actor. * * @param jobClusterDefinition The job cluster definition to be used while creating this job. * @param jobMetadata The job metadata provided by the user. * @param jobStore Reference to the persistence store {@link MantisJobStore}. * @param mantisScheduler Reference to the {@link MantisScheduler} to be used to schedule work * @param eventPublisher Reference to the event publisher {@link LifecycleEventPublisher} where * lifecycle events * are to be published. * * @return */ public static Props props(final IJobClusterDefinition jobClusterDefinition, final MantisJobMetadataImpl jobMetadata, final MantisJobStore jobStore, final MantisScheduler mantisScheduler, final LifecycleEventPublisher eventPublisher) { return Props.create(JobActor.class, jobClusterDefinition, jobMetadata, jobStore, mantisScheduler, eventPublisher); } /** * This is invoked indirectly via props method to create an instance of this class. * * @param jobClusterDefinition * @param jobMetadata * @param jobStore * @param scheduler * @param eventPublisher */ public JobActor(final IJobClusterDefinition jobClusterDefinition, final MantisJobMetadataImpl jobMetadata, MantisJobStore jobStore, final MantisScheduler scheduler, final LifecycleEventPublisher eventPublisher) { this.clusterName = jobMetadata.getClusterName(); this.jobId = jobMetadata.getJobId(); this.jobStore = jobStore; this.jobClusterDefinition = jobClusterDefinition; this.mantisScheduler = scheduler; this.eventPublisher = eventPublisher; this.mantisJobMetaData = jobMetadata; initializedBehavior = getInitializedBehavior(); activeBehavior = getActiveBehavior(); terminatingBehavior = getTerminatingBehavior(); terminatedBehavior = getTerminatedBehavior(); this.metricsGroupId = getMetricGroupId(jobId.getId()); Metrics m = new Metrics.Builder() .id(metricsGroupId) .addCounter("numWorkerResubmissions") .addCounter("numWorkerResubmitLimitReached") .addCounter("numWorkerTerminated") .addCounter("numScaleStage") .addCounter("numWorkersCompletedNotTerminal") .addCounter("numSchedulingChangesRefreshed") .addCounter("numMissingWorkerPorts") .build(); this.metrics = MetricsRegistry.getInstance().registerAndGet(m); this.numWorkerResubmissions = metrics.getCounter("numWorkerResubmissions"); this.numWorkerResubmitLimitReached = metrics.getCounter("numWorkerResubmitLimitReached"); this.numWorkerTerminated = metrics.getCounter("numWorkerTerminated"); this.numScaleStage = metrics.getCounter("numScaleStage"); this.numWorkersCompletedNotTerminal = metrics.getCounter("numWorkersCompletedNotTerminal"); this.numSchedulingChangesRefreshed = metrics.getCounter("numSchedulingChangesRefreshed"); this.numMissingWorkerPorts = metrics.getCounter("numMissingWorkerPorts"); } /** * Create a MetricGroupId using the given job Id. * * @param id * * @return */ MetricGroupId getMetricGroupId(String id) { return new MetricGroupId("JobActor", new BasicTag("jobId", id)); } /** * Validates the job definition, stores the job to persistence. * Instantiates the SubscriptionManager to keep track of subscription and runtime timeouts * Instantiates the WorkerManager which manages the worker life cycle * * @throws InvalidJobRequest * @throws InvalidJobException */ void initialize(boolean isSubmit) throws Exception { LOGGER.info("Initializing Job {}", jobId); if (isSubmit) { eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(INFO, "Job request received", getJobId(), getJobState())); // Ignore isReady flag, if the job is autoscaled it gets a Job Master // this.jobClusterDefinition.getIsReadyForJobMaster() && if (isAutoscaled(mantisJobMetaData.getSchedulingInfo())) { LOGGER.info("Job is autoscaled, setting up Job Master"); setupJobMasterStage(mantisJobMetaData.getSchedulingInfo()); } LOGGER.info("Storing job"); jobStore.storeNewJob(mantisJobMetaData); } LOGGER.info("Stored mantis job"); this.workerManager = new WorkerManager(this, jobClusterDefinition.getWorkerMigrationConfig(), this.mantisScheduler, isSubmit); long checkAgainInSeconds = ConfigurationProvider.getConfig().getWorkerTimeoutSecs(); long refreshStageAssignementsDurationMs = ConfigurationProvider.getConfig() .getStageAssignmentRefreshIntervalMs(); getTimers().startPeriodicTimer(CHECK_HB_TIMER_KEY, new JobProto.CheckHeartBeat(), Duration.ofSeconds(checkAgainInSeconds)); // -1 indicates disabled, which means all updates will be sent immediately if (refreshStageAssignementsDurationMs > 0) { getTimers().startPeriodicTimer(REFRESH_SEND_STAGE_ASSIGNEMNTS_KEY, new JobProto.SendWorkerAssignementsIfChanged(), Duration.ofMillis(refreshStageAssignementsDurationMs)); } mantisJobMetaData.getJobDefinition().getJobSla().getRuntimeLimitSecs(); LOGGER.info("Job {} initialized", this.jobId); } private void setupJobMasterStage(SchedulingInfo schedulingInfo) throws io.mantisrx.runtime.command.InvalidJobException { LOGGER.info("Job {} is autoscaled setting up Job Master", this.jobId); if (schedulingInfo.forStage(0) == null) { // create stage 0 schedulingInfo only if not already provided final StageSchedulingInfo stageSchedulingInfo = new StageSchedulingInfo(1, getJobMasterMachineDef(), null, null, // for now, there are no hard or soft constraints null, false); // jobMaster stage itself is not scaled schedulingInfo.addJobMasterStage(stageSchedulingInfo); // Update jobMetadata with the new stage added mantisJobMetaData = new MantisJobMetadataImpl.Builder() .from(mantisJobMetaData) .withJobDefinition(new JobDefinition.Builder() .from(mantisJobMetaData.getJobDefinition()) .withSchedulingInfo(schedulingInfo) .withNumberOfStages(schedulingInfo.getStages().size()) .build()) .build(); } hasJobMaster = true; } private MachineDefinition getJobMasterMachineDef() { MasterConfiguration config = ConfigurationProvider.getConfig(); if (config != null) { return new MachineDefinition( config.getJobMasterCores(), config.getJobMasterMemoryMB(), config.getJobMasterNetworkMbps(), config.getJobMasterDiskMB(), 1 ); } else { return new MachineDefinition( DEFAULT_JOB_MASTER_CORES, DEFAULT_JOB_MASTER_MEM, DEFAULT_JOB_MASTER_NW, DEFAULT_JOB_MASTER_DISK, 1); } } @Override public void preStart() throws Exception { LOGGER.info("Job Actor {}-{} started", clusterName, jobId); } @Override public void postStop() throws Exception { LOGGER.info("Job Actor {} stopped invoking cleanup logic", jobId); if (jobId != null) { MetricsRegistry.getInstance().remove(getMetricGroupId(jobId.getId())); } //shutdown(); } @Override public SupervisorStrategy supervisorStrategy() { // custom supervisor strategy to resume the child actors on Exception instead of the default restart return MantisActorSupervisorStrategy.getInstance().create(); } @Override public Receive createReceive() { return getInitializingBehavior(); } private String genUnexpectedMsg(String event, String cluster, String state) { return String.format("Unexpected message %s received by Job actor %s in %s State", event, cluster, state); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* Job Actor behaviors 12 total * - Init * - GET * - LIST workers * - GET SCHED INFO * - SCALE * - KILL * - RESUBMIT WORKER * - WorkerEvent * * // SELF SENT * - HB enforcement * - Runtime enforcement * - Self Destruct * - Refresh Stage Assignments */ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// /** * A Terminating Job allows. * - GET * - LIST workers * - WorkerEvent * * @return */ private Receive getTerminatingBehavior() { String state = "terminating"; return receiveBuilder() // EXPECTED MESSAGES BEGIN// // get Job Details .match(GetJobDetailsRequest.class, this::onGetJobDetails) // list active workers request .match(ListWorkersRequest.class, this::onListActiveWorkers) // EXPECTED MESSAGES END// // UNEXPECTED MESSAGES BEGIN // // Worker related events .match(WorkerEvent.class, (x) -> LOGGER.warn("Job {} is Terminating, ignoring worker Events {}", this.jobId.getId(), x)) .match(InitJob.class, (x) -> getSender().tell(new JobInitialized(x.requestId, SUCCESS, genUnexpectedMsg(x.toString(), this.jobId.getId(), state), this.jobId, x.requstor), getSelf())) // explicit resubmit worker .match(ResubmitWorkerRequest.class, (x) -> getSender().tell(new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.jobId.getId(), state)), getSelf())) // Heart beat accounting timers .match(JobProto.CheckHeartBeat.class, (x) -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state))) // runtime limit reached .match(JobProto.RuntimeLimitReached.class, (x) -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state))) // Kill job request .match(JobClusterProto.KillJobRequest.class, (x) -> getSender().tell(new KillJobResponse(x.requestId, SUCCESS, JobState.Noop, genUnexpectedMsg(x.toString(), this.jobId.getId(), state), this.jobId, x.user), getSelf())) // scale stage request .match(ScaleStageRequest.class, (x) -> getSender().tell(new ScaleStageResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.jobId.getId(), state), 0), getSelf())) // scheduling Info observable .match(GetJobSchedInfoRequest.class, (x) -> getSender().tell( new GetJobSchedInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.jobId.getId(), state), empty()), getSelf())) .match(GetLatestJobDiscoveryInfoRequest.class, (x) -> getSender().tell( new GetLatestJobDiscoveryInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.jobId.getId(), state), empty()), getSelf())) .match(JobProto.SendWorkerAssignementsIfChanged.class, (x) -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state))) .match(KillJobResponse.class, (x) -> LOGGER.info("Received Kill Job Response in" + "Terminating State Ignoring")) .matchAny(x -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state))) // UNEXPECTED MESSAGES END .build(); } /** * A Terminated Job allows. * - GET * - LIST workers * * @return */ private Receive getTerminatedBehavior() { String state = "terminated"; return receiveBuilder() // EXPECTED MESSAGES BEGIN// // get Job Details .match(GetJobDetailsRequest.class, this::onGetJobDetails) // list active workers request .match(ListWorkersRequest.class, this::onListActiveWorkers) // EXPECTED MESSAGES END// // UNEXPECTED MESSAGES BEGIN // .match(InitJob.class, (x) -> getSender().tell( new JobInitialized(x.requestId, SUCCESS, genUnexpectedMsg( x.toString(), this.jobId.getId(), state), this.jobId, x.requstor), getSelf())) // explicit resubmit worker .match(ResubmitWorkerRequest.class, (x) -> getSender().tell( new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.jobId.getId(), state)), getSelf())) // Heart beat accounting timers .match(JobProto.CheckHeartBeat.class, (x) -> LOGGER.warn( genUnexpectedMsg(x.toString(), this.jobId.getId(), state))) // Migrate worker request .match(JobProto.MigrateDisabledVmWorkersRequest.class, (x) -> LOGGER.warn( genUnexpectedMsg(x.toString(), this.jobId.getId(), state))) // runtime limit reached .match(JobProto.RuntimeLimitReached.class, (x) -> LOGGER.warn( genUnexpectedMsg(x.toString(), this.jobId.getId(), state))) // Kill job request .match(JobClusterProto.KillJobRequest.class, (x) -> getSender().tell( new KillJobResponse(x.requestId, SUCCESS, JobState.Noop, genUnexpectedMsg(x.toString(), this.jobId.getId(), state), this.jobId, x.user), getSelf())) // scale stage request .match(ScaleStageRequest.class, (x) -> getSender().tell( new ScaleStageResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg(x.toString(), this.jobId.getId(), state), 0), getSelf())) // scheduling Info observable .match(GetJobSchedInfoRequest.class, (x) -> getSender().tell( new GetJobSchedInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg( x.toString(), this.jobId.getId(), state), empty()), getSelf())) .match(GetLatestJobDiscoveryInfoRequest.class, (x) -> getSender().tell( new GetLatestJobDiscoveryInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg( x.toString(), this.jobId.getId(), state), empty()), getSelf())) .match(KillJobResponse.class, (x) -> LOGGER.info("Received Kill Job Response in" + "Terminating State Ignoring")) .match(JobProto.SendWorkerAssignementsIfChanged.class, (x) -> LOGGER.warn(genUnexpectedMsg( x.toString(), this.jobId.getId(), state))) // Worker related events .match(WorkerEvent.class, (x) -> LOGGER.info("Received worker event in Terminated State Ignoring")) .matchAny(x -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state))) // UNEXPECTED MESSAGES END .build(); } /** * An active job allows. * - GET * - LIST workers * - GET SCHED INFO * - SCALE * - KILL * - RESUBMIT WORKER * - WorkerEvent * - HB enforcement * - Runtime enforcement * - Refresh Stage Assignments * * @return */ private Receive getActiveBehavior() { String state = "active"; // get Job Details return receiveBuilder() // EXPECTED MESSAGES BEGIN// .match(GetJobDetailsRequest.class, this::onGetJobDetails) // Worker related events .match(WorkerEvent.class, r -> processWorkerEvent(r)) // explicit resubmit worker .match(ResubmitWorkerRequest.class, this::onResubmitWorker) // Heart beat accounting timers .match(JobProto.CheckHeartBeat.class, this::onCheckHeartBeats) // Migrate workers from disabled VMs .match(JobProto.MigrateDisabledVmWorkersRequest.class, this::onMigrateWorkers) // runtime limit reached .match(JobProto.RuntimeLimitReached.class, this::onRuntimeLimitReached) // Kill job request .match(JobClusterProto.KillJobRequest.class, this::onJobKill) // scale stage request .match(ScaleStageRequest.class, this::onScaleStage) // list active workers request .match(ListWorkersRequest.class, this::onListActiveWorkers) // scheduling Info observable .match(GetJobSchedInfoRequest.class, this::onGetJobStatusSubject) .match(GetLatestJobDiscoveryInfoRequest.class, this::onGetLatestJobDiscoveryInfo) .match(JobProto.SendWorkerAssignementsIfChanged.class, this::onSendWorkerAssignments) // EXPECTED MESSAGES END// // UNEXPECTED MESSAGES BEGIN // .match(InitJob.class, (x) -> getSender().tell(new JobInitialized(x.requestId, SUCCESS, genUnexpectedMsg(x.toString(), this.jobId.getId(), state), this.jobId, x.requstor), getSelf())) .matchAny(x -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state))) // UNEXPECTED MESSAGES END // .build(); } /** * INITIALIZED JOB allows. * - GET * - LIST workers * - GET SCHED INFO * - KILL * - WorkerEvent * - HB enforcement * - REFRESH STAGE scheduling info * * @return */ private Receive getInitializedBehavior() { String state = "initialized"; return receiveBuilder() // EXPECTED MESSAGES BEGIN// // get Job Details .match(GetJobDetailsRequest.class, this::onGetJobDetails) // Worker related events .match(WorkerEvent.class, r -> processWorkerEvent(r)) // Heart beat accounting timers .match(JobProto.CheckHeartBeat.class, this::onCheckHeartBeats) // Migrate workers from disabled VMs .match(JobProto.MigrateDisabledVmWorkersRequest.class, this::onMigrateWorkers) // Kill job request .match(JobClusterProto.KillJobRequest.class, this::onJobKill) // list active workers request .match(ListWorkersRequest.class, this::onListActiveWorkers) .match(GetJobSchedInfoRequest.class, this::onGetJobStatusSubject) .match(GetLatestJobDiscoveryInfoRequest.class, this::onGetLatestJobDiscoveryInfo) .match(JobProto.SendWorkerAssignementsIfChanged.class, this::onSendWorkerAssignments) // EXPECTED MESSAGES END// // UNEXPECTED MESSAGES BEGIN // // explicit resubmit worker .match(ResubmitWorkerRequest.class, (x) -> getSender().tell( new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg( x.toString(), this.jobId.getId(), state)), getSelf())) // runtime limit reached .match(JobProto.RuntimeLimitReached.class, (x) -> LOGGER.warn(genUnexpectedMsg( x.toString(), this.jobId.getId(), state))) // scale stage request .match(ScaleStageRequest.class, (x) -> getSender().tell( new ScaleStageResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg( x.toString(), this.jobId.getId(), state), 0), getSelf())) .match(InitJob.class, (x) -> getSender().tell(new JobInitialized(x.requestId, SUCCESS, genUnexpectedMsg(x.toString(), this.jobId.getId(), state), this.jobId, x.requstor), getSelf())) .matchAny(x -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state))) // UNEXPECTED MESSAGES END // .build(); } /** * AN INITIALIZING JOB ALLOWS. * - Init Job * * @return */ private Receive getInitializingBehavior() { String state = "initializing"; return receiveBuilder() // EXPECTED MESSAGES BEING// .match(InitJob.class, this::onJobInitialize) // EXPECTED MESSAGES END// //UNEXPECTED MESSAGES BEGIN // // get Job Details .match(GetJobDetailsRequest.class, (x) -> getSender().tell( new GetJobDetailsResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg( x.toString(), this.jobId.getId(), state), empty()), getSelf())) // Worker related events .match(WorkerEvent.class, (x) -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state))) // explicit resubmit worker .match(ResubmitWorkerRequest.class, (x) -> getSender().tell( new ResubmitWorkerResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg( x.toString(), this.jobId.getId(), state)), getSelf())) // Heart beat accounting timers .match(JobProto.CheckHeartBeat.class, (x) -> LOGGER.warn(genUnexpectedMsg( x.toString(), this.jobId.getId(), state))) // Migrate workers request .match(JobProto.MigrateDisabledVmWorkersRequest.class, (x) -> LOGGER.warn(genUnexpectedMsg( x.toString(), this.jobId.getId(), state))) // runtime limit reached .match(JobProto.RuntimeLimitReached.class, (x) -> LOGGER.warn(genUnexpectedMsg( x.toString(), this.jobId.getId(), state))) // Kill job request .match(JobClusterProto.KillJobRequest.class, (x) -> getSender().tell( new KillJobResponse(x.requestId, CLIENT_ERROR, JobState.Noop, genUnexpectedMsg( x.toString(), this.jobId.getId(), state), this.jobId, x.user), getSelf())) // scale stage request .match(ScaleStageRequest.class, (x) -> getSender().tell( new ScaleStageResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg( x.toString(), this.jobId.getId(), state), 0), getSelf())) // list active workers request .match(ListWorkersRequest.class, (x) -> getSender().tell( new ListWorkersResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg( x.toString(), this.jobId.getId(), state), Lists.newArrayList()), getSelf())) // scheduling Info observable .match(GetJobSchedInfoRequest.class, (x) -> getSender().tell( new GetJobSchedInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg( x.toString(), this.jobId.getId(), state), empty()), getSelf())) // latest scheduling Info .match(GetLatestJobDiscoveryInfoRequest.class, (x) -> getSender().tell( new GetLatestJobDiscoveryInfoResponse(x.requestId, CLIENT_ERROR, genUnexpectedMsg( x.toString(), this.jobId.getId(), state), empty()), getSelf())) //UNEXPECTED MESSAGES END // .matchAny(x -> LOGGER.warn(genUnexpectedMsg(x.toString(), this.jobId.getId(), state))) .build(); } //////////////////////////////////////////// Akka Messages sent to the Job Actor Begin///////////////////// @Override public void onJobInitialize(InitJob i) { ActorRef sender = getSender(); try { initialize(i.isSubmit); if (JobState.isRunningState(mantisJobMetaData.getState())) { getContext().become(activeBehavior); setRuntimeLimitTimersIfRequired(Instant.now()); } else { getContext().become(initializedBehavior); } sender.tell(new JobInitialized(i.requestId, SUCCESS, String.format( "Job %s initialized successfully", jobId), jobId, i.requstor), getSelf()); } catch (Exception e) { LOGGER.error("Exception initializing job ", e); sender.tell(new JobInitialized(i.requestId, SERVER_ERROR, "" + e.getMessage(), jobId, i.requstor), getSelf()); } } /** * Return information related to this job. * * @param r */ @Override public void onGetJobDetails(GetJobDetailsRequest r) { ActorRef sender = getSender(); sender.tell(new GetJobDetailsResponse(r.requestId, SUCCESS, "", of(getJobDetails())), getSelf()); } /** * Return a BehaviorSubject that streams worker lifecycle events to the user. * * @param r */ @Override public void onGetJobStatusSubject(GetJobSchedInfoRequest r) { LOGGER.trace("Entering onGetJobStatusSubject {}", r); ActorRef sender = getSender(); if (r.getJobId().equals(this.jobId)) { sender.tell(new GetJobSchedInfoResponse(r.requestId, SUCCESS, "", of(workerManager.getJobStatusSubject())), getSelf()); } else { String msg = "JobId in the request " + r.getJobId() + " does not match Job Actors job Id " + this.jobId; LOGGER.warn(msg); sender.tell(new GetJobSchedInfoResponse(r.requestId, CLIENT_ERROR, msg, empty()), getSelf()); } } @Override public void onGetLatestJobDiscoveryInfo(GetLatestJobDiscoveryInfoRequest r) { LOGGER.trace("Entering onGetLatestJobDiscoveryInfo {}", r); ActorRef sender = getSender(); if (r.getJobCluster().equals(this.jobId.getCluster())) { JobSchedulingInfo schedulingInfo = workerManager.getJobStatusSubject().getValue(); if (schedulingInfo != null) { sender.tell(new GetLatestJobDiscoveryInfoResponse(r.requestId, SUCCESS, "", ofNullable(schedulingInfo)), getSelf()); } else { LOGGER.info("discoveryInfo from BehaviorSubject is null {}", jobId); sender.tell(new GetLatestJobDiscoveryInfoResponse(r.requestId, SERVER_ERROR, "discoveryInfo from BehaviorSubject is null " + jobId, empty()), getSelf()); } } else { String msg = "JobCluster in the request " + r.getJobCluster() + " does not match Job Actors job ID " + this.jobId; LOGGER.warn(msg); sender.tell(new GetLatestJobDiscoveryInfoResponse(r.requestId, SERVER_ERROR, msg, empty()), getSelf()); } } /** * Worker Events sent by the worker itself of the Scheduling Service. */ @Override public void processWorkerEvent(final WorkerEvent e) { this.workerManager.processEvent(e, mantisJobMetaData.getState()); } /** * Resubmit a specific worker Index. */ @Override public void onResubmitWorker(final ResubmitWorkerRequest r) { if (LOGGER.isTraceEnabled()) LOGGER.trace("Enter Job {} onResubmitWorker {}", jobId, r); ActorRef sender = getSender(); try { eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(INFO, r.getWorkerNum() + " workerNum resubmit requested by " + r.getUser() + " , reason: " + r.getReason(), getJobId(), getJobState())); this.workerManager.resubmitWorker(r.getWorkerNum()); numWorkerResubmissions.increment(); sender.tell(new ResubmitWorkerResponse(r.requestId, SUCCESS, String.format("Worker %d of job %s resubmitted", r.getWorkerNum(), r.getJobId())), getSelf()); } catch (Exception e) { sender.tell(new ResubmitWorkerResponse(r.requestId, SERVER_ERROR, e.getMessage()), getSelf()); } if (LOGGER.isTraceEnabled()) LOGGER.trace("Exit Job {} onResubmitWorker {}", jobId, r); } @Override public void onMigrateWorkers(final JobProto.MigrateDisabledVmWorkersRequest r) { LOGGER.trace("Enter JobActor::onMigrateWorkersRequest {}", jobId); workerManager.migrateDisabledVmWorkers(r.time); } /** * Invoked periodically to check heart beat status of the workers. * * @param r */ @Override public void onCheckHeartBeats(final JobProto.CheckHeartBeat r) { LOGGER.trace("Enter JobActor::onCheckHearbeats {}", jobId); this.workerManager.checkHeartBeats(r.getTime()); } @Override public void onRuntimeLimitReached(final JobProto.RuntimeLimitReached r) { LOGGER.info("In onRuntimeLimitReached {} for Job {} ", Instant.now(), this.jobId); LOGGER.info("Job {} Started at {} and killed at {} due to Runtime limit reached", jobId, mantisJobMetaData.getStartedAtInstant().orElse(Instant.now()), Instant.now()); getContext().getParent().tell(new JobClusterProto.KillJobRequest(jobId, "runtime limit reached", JobCompletedReason.Killed, MANTIS_MASTER_USER, ActorRef.noSender()), getSelf()); } @Override public void onSendWorkerAssignments(final JobProto.SendWorkerAssignementsIfChanged r) { LOGGER.trace("Enter JobActor::onSendWorkerAssignments {}", jobId); this.workerManager.refreshAndSendWorkerAssignments(); } /** * Will update Job state to terminal. * Unschedule all workers * Update worker state as failed in DB * Archive job * Self destruct * <p> * Worker terminated events will get ignored. * * @param req */ @Override public void onJobKill(JobClusterProto.KillJobRequest req) { LOGGER.trace("Enter JobActor::onJobKill {}", jobId); ActorRef sender = getSender(); LOGGER.info("Shutting down job {} on request by {}", jobId, sender); if (LOGGER.isDebugEnabled()) { LOGGER.info("shutting down job with metadata {}", mantisJobMetaData); } eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(INFO, "Killing job, reason: " + req.reason, getJobId(), getJobState())); try { JobState newState; if (req.jobCompletedReason.equals(JobCompletedReason.Error) || req.jobCompletedReason.equals(JobCompletedReason.Lost)) { newState = JobState.Failed; } else { newState = JobState.Completed; } // update job state updateStateAndPersist(newState); // inform caller sender.tell(new JobClusterProto.KillJobResponse(req.requestId, SUCCESS, getJobState(), getJobId() + " terminated", getJobId(), this.mantisJobMetaData, req.user, req.requestor), getSelf()); // continue with rest of the shutdown getTimers().cancel(CHECK_HB_TIMER_KEY); getContext().become(terminatingBehavior); // shutdown workers shutdown(newState, req.reason); // take poison pill performFinalShutdown(); } catch (Exception e) { sender.tell(new JobClusterProto.KillJobResponse(req.requestId, SERVER_ERROR, getJobState(), getJobId() + " Could not be terminated due to " + e.getMessage(), getJobId(), this.mantisJobMetaData, req.user, req.requestor), getSelf()); } LOGGER.trace("Exit JobActor::onJobKill {}", jobId); } @Override public void onScaleStage(ScaleStageRequest scaleStage) { LOGGER.info("In Scale stage {} for Job {}", scaleStage, this.jobId); ActorRef sender = getSender(); Optional<IMantisStageMetadata> stageMeta = this.mantisJobMetaData.getStageMetadata(scaleStage.getStageNum()); // Make sure stage is valid if (!stageMeta.isPresent()) { LOGGER.warn("Stage {} does not exist in Job {}", scaleStage.getStageNum(), this.jobId); sender.tell(new ScaleStageResponse(scaleStage.requestId, CLIENT_ERROR, "Non existent stage " + scaleStage.getStageNum(), 0), getSelf()); return; } // Make sure stage is scalable MantisStageMetadataImpl stageMetaData = (MantisStageMetadataImpl) stageMeta.get(); if (!stageMetaData.getScalable()) { LOGGER.warn("Stage {} is not scalable in Job {}", scaleStage.getStageNum(), this.jobId); eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent( LifecycleEventsProto.StatusEvent.StatusEventType.WARN, "Can't change #workers to " + scaleStage.getNumWorkers() + ", stage " + scaleStage.getStageNum() + " is not scalable", getJobId(), getJobState())); sender.tell(new ScaleStageResponse(scaleStage.requestId, CLIENT_ERROR, "Stage " + scaleStage.getStageNum() + " is not scalable", 0), getSelf()); return; } try { int actualScaleup = this.workerManager.scaleStage(stageMetaData, scaleStage.getNumWorkers(), scaleStage.getReason()); LOGGER.info("Scaled stage {} to {} workers for Job {}", scaleStage.getStageNum(), actualScaleup, this.jobId); numScaleStage.increment(); sender.tell(new ScaleStageResponse(scaleStage.requestId, SUCCESS, String.format("Scaled stage %s to %s workers", scaleStage.getStageNum(), actualScaleup), actualScaleup), getSelf()); } catch (Exception e) { String msg = String.format("Stage %d scale failed due to %s", scaleStage.getStageNum(), e.getMessage()); LOGGER.error(msg, e); sender.tell(new ScaleStageResponse(scaleStage.requestId, SERVER_ERROR, msg, 0), getSelf()); } } /** * Responds with {@link ListWorkersResponse} object containing data about all active workers. * @param listWorkersRequest */ public void onListActiveWorkers(ListWorkersRequest listWorkersRequest) { ActorRef sender = getSender(); List<IMantisWorkerMetadata> activeWorkers = this.workerManager.getActiveWorkers(listWorkersRequest.getLimit()); sender.tell(new ListWorkersResponse(listWorkersRequest.requestId, SUCCESS, "", Collections.unmodifiableList(activeWorkers)), getSelf()); } //////////////////////////////////////////// Akka Messages sent to the Job Actor End///////////////////////// /////////////////////////////////////////// Internal State change events Begin ////////////////////////////// private void performFinalShutdown() { if (LOGGER.isTraceEnabled()) { LOGGER.trace("Enter performFinalShutdown for Job {}", jobId); } try { LOGGER.info("Archiving Job {}", this.jobId); jobStore.archiveJob(mantisJobMetaData); } catch (IOException e) { LOGGER.warn("Exception archiving job " + mantisJobMetaData.getJobId(), e); } getContext().become(terminatedBehavior); // commit suicide getSelf().tell(PoisonPill.getInstance(), ActorRef.noSender()); if (LOGGER.isTraceEnabled()) { LOGGER.trace("Exit performFinalShutdown for Job {}", jobId); } } /** * Invoked when all workers are in terminal state. * Should get called only during shutdown process */ @Override public void onAllWorkersCompleted() { LOGGER.info("JobActor: onAllWorkersCompleted with current state {}", mantisJobMetaData.getState()); if (!JobState.isTerminalState(mantisJobMetaData.getState()) && !allWorkersCompleted) { LOGGER.info("All workers completed but job {} in {} state. Request termination", jobId, getJobState()); allWorkersCompleted = true; getContext().parent().tell( new JobClusterProto.KillJobRequest( jobId, "Job Completed", JobCompletedReason.Normal, MANTIS_MASTER_USER, ActorRef.noSender()), getSelf()); numWorkersCompletedNotTerminal.increment(); } else { // job kill has already been requested, ignore LOGGER.debug("Job {} Kill already requested", this.jobId); } } /** * Should get called only once after all workers have started. */ @Override public boolean onAllWorkersStarted() { LOGGER.info("In onAllWorkersStarted for Job {}", jobId); boolean isSuccess = true; if (mantisJobMetaData.getState() == JobState.Accepted) { try { // update record in storage updateStateAndPersist(JobState.Launched); // update behavior to active getContext().become(activeBehavior); eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(INFO, "all workers started, job transitioning to Active", getJobId(), getJobState())); // inform job cluster manager that the job has started getContext().getParent().tell(new JobClusterProto.JobStartedEvent(getJobId()), getSelf()); // kick off max runtime timer if needed Instant currentTime = Instant.now(); // Update start time and persist state mantisJobMetaData.setStartedAt(currentTime.toEpochMilli(), jobStore); setRuntimeLimitTimersIfRequired(currentTime); } catch (Exception e) { LOGGER.error("Error processing all worker started event ", e); isSuccess = false; } } else if (mantisJobMetaData.getState() == JobState.Launched) { // no op LOGGER.info("Job is already in launched state"); isSuccess = false; } else { // something is wrong! LOGGER.warn("Unexpected all Workers Started Event while job in {} state", mantisJobMetaData.getState()); isSuccess = false; } if (LOGGER.isTraceEnabled()) { LOGGER.trace("Exit AllWorkerStarted event processed successfully ? {}", isSuccess); } return isSuccess; } /** * Invoked if workers have been relaunched too many times. * Request this job to be terminated and marked as failed */ @Override public boolean onTooManyWorkerResubmits() { LOGGER.warn("Too many worker resubmits detected for Job {}. Requesting job shutdown", jobId); boolean isSuccess = true; eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(ERROR, "Worker Resubmit limit reached, shutting down job", getJobId(), getJobState())); numWorkerResubmitLimitReached.increment(); //updateStateAndPersist(JobState.Terminating_abnormal); // ask Parent to shut it down getContext().parent().tell( new JobClusterProto.KillJobRequest( jobId, "Too many worker resubmits", JobCompletedReason.Error, MANTIS_MASTER_USER, ActorRef.noSender()), getSelf()); return isSuccess; } //////////////////////////////////Internal State Change Events END ////////////////////////////////////// /** * Retuns the details of this job. */ @Override public IMantisJobMetadata getJobDetails() { LOGGER.debug("Returning job Details {}", this.mantisJobMetaData); return this.mantisJobMetaData; } /** * Triggered when the JobActor receives the Job Kill message. * it will update the state of the job to terminating in the persistence layer and request * the workers to be terminated. * * @param state */ @Override public void shutdown(JobState state, String reason) { LOGGER.info("Entering JobActor:shutdown {}", jobId); workerManager.shutdown(); eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(INFO, "job shutdown, reason: " + reason, getJobId(), state)); eventPublisher.publishAuditEvent(new LifecycleEventsProto.AuditEvent( LifecycleEventsProto.AuditEvent.AuditEventType.JOB_TERMINATE, jobId.getId(), "job shutdown, reason: " + reason)); } @Override public JobId getJobId() { return this.jobId; } private void updateStateAndPersist(JobState newState) throws Exception { LOGGER.trace("Enter JobActor::updateStateAndPersist for Job {} Updating job Status {}", jobId, newState); mantisJobMetaData.setJobState(newState, jobStore); LOGGER.trace("Exit JobActor::updateStateAndPersist for Job {}", jobId); } /** * Always invoked after the job has transitioned to started state. * @param currentTime */ private void setRuntimeLimitTimersIfRequired(Instant currentTime) { long maxRuntimeSecs = mantisJobMetaData.getJobDefinition().getJobSla().getRuntimeLimitSecs(); Instant startedAt = mantisJobMetaData.getStartedAtInstant().orElse(currentTime); long terminateJobInSecs; if (maxRuntimeSecs > 0) { terminateJobInSecs = JobHelper.calculateRuntimeDuration(maxRuntimeSecs,startedAt); LOGGER.info("Will terminate Job {} at {} ", jobId, (currentTime.plusSeconds(terminateJobInSecs))); getTimers().startSingleTimer("RUNTIME_LIMIT", new JobProto.RuntimeLimitReached(), Duration.ofSeconds(terminateJobInSecs)); } else { LOGGER.info("maxRuntime for Job {} is {} ignore ", jobId, mantisJobMetaData.getJobDefinition() .getJobSla().getRuntimeLimitSecs()); } } @Override public JobState getJobState() { return mantisJobMetaData.getState(); } private boolean isAutoscaled(SchedulingInfo schedulingInfo) { LOGGER.trace("In isAutoscaled {}", schedulingInfo); for (Map.Entry<Integer, StageSchedulingInfo> entry : schedulingInfo.getStages().entrySet()) { final StageScalingPolicy scalingPolicy = entry.getValue().getScalingPolicy(); if (scalingPolicy != null && scalingPolicy.isEnabled()) { LOGGER.info("Job {} is autoscaleable", jobId); return true; } } LOGGER.info("Job {} is NOT scaleable", jobId); return false; } /*package protected*/ /** * Returns the calculated subscription timeout in seconds for this job. * @param mjmd * @return */ static long getSubscriptionTimeoutSecs(final IMantisJobMetadata mjmd) { // if perpetual job there is no subscription timeout if (mjmd.getJobDefinition().getJobSla().getDurationType() == MantisJobDurationType.Perpetual) return 0; return mjmd.getSubscriptionTimeoutSecs() == 0 ? ConfigurationProvider.getConfig().getEphemeralJobUnsubscribedTimeoutSecs() : mjmd.getSubscriptionTimeoutSecs(); } /** * Keeps track of the last used worker number and mints a new one every time a worker is scheduled. */ static class WorkerNumberGenerator { private static final Logger LOGGER = LoggerFactory.getLogger(WorkerNumberGenerator.class); private static final int DEFAULT_INCREMENT_STEP = 10; private final int incrementStep; private int lastUsed; private int currLimit; private volatile boolean hasErrored = false; /** * Creates an instance of this class. * * @param lastUsed * @param incrementStep */ WorkerNumberGenerator(int lastUsed, int incrementStep) { Preconditions.checkArgument(lastUsed >= 0, "Last Used worker Number cannot be negative {} ", lastUsed); Preconditions.checkArgument(incrementStep >= 1, "incrementStepcannot be less than 1 {} ", incrementStep); LOGGER.debug("WorkerNumberGenerator ctor2 lastUsed {}", lastUsed); this.lastUsed = lastUsed; this.currLimit = lastUsed; this.incrementStep = incrementStep; } /** * Default constructor sets last used number to 0. */ WorkerNumberGenerator() { this(0, DEFAULT_INCREMENT_STEP); LOGGER.trace("WorkerNumberGenerator ctor1"); } private void advance(MantisJobMetadataImpl mantisJobMetaData, MantisJobStore jobStore) { LOGGER.trace("getNextWorkerNumber in advance"); try { currLimit += incrementStep; mantisJobMetaData.setNextWorkerNumberToUse(currLimit, jobStore); if (LOGGER.isDebugEnabled()) { LOGGER.debug("{} nextWorkerNumber set to : {} ", mantisJobMetaData.getJobId(), currLimit); } } catch (Exception e) { hasErrored = true; LOGGER.error("Exception setting next Worker number to use ", e); throw new RuntimeException("Unexpected: " + e.getMessage()); } } /** * Get the next unused worker number. * <p> * For performance reasosns, this object updates state in persistence every N calls made to this method. * * @return The next worker number to use for new workers * * @throws IllegalStateException if there was an error saving the next worker number to use to the job store */ int getNextWorkerNumber(MantisJobMetadataImpl mantisJobMetaData, MantisJobStore jobStore) { LOGGER.debug("getNextWorkerNumber lastUsed {}", lastUsed); if (hasErrored) throw new IllegalStateException("Unexpected: Invalid state likely due to getting/setting" + "next worker number"); if (lastUsed == currLimit) advance(mantisJobMetaData, jobStore); LOGGER.debug("getNextWorkerNumber returns {}", lastUsed + 1); return ++lastUsed; } } /** * Responsible for managing worker related state of this job. */ class WorkerManager implements IWorkerManager { private static final int WORKER_RESUBMIT_LIMIT = 100; private ObjectMapper mapper = new ObjectMapper(); private final WorkerNumberGenerator workerNumberGenerator; private boolean allWorkersStarted = false; private final IMantisJobManager jobMgr; private ConcurrentSkipListSet<Integer> workersToMigrate = new ConcurrentSkipListSet<>(); private int sinkStageNum; private final MigrationStrategy migrationStrategy; private final MantisScheduler scheduler; private long lastWorkerMigrationTimestamp = Long.MIN_VALUE; private Map<Integer, WorkerAssignments> stageAssignments = new HashMap<>(); private BehaviorSubject<JobSchedulingInfo> jobSchedulingInfoBehaviorSubject; private String currentJobSchedulingInfoStr = null; private final WorkerResubmitRateLimiter resubmitRateLimiter = new WorkerResubmitRateLimiter(); private volatile boolean stageAssignmentPotentiallyChanged; /** * Creates an instance of this class. * * @param jobMgr * @param migrationConfig * @param scheduler * @param isSubmit * * @throws Exception */ WorkerManager(IMantisJobManager jobMgr, WorkerMigrationConfig migrationConfig, MantisScheduler scheduler, boolean isSubmit) throws Exception { LOGGER.debug("In WorkerManager ctor"); workerNumberGenerator = new WorkerNumberGenerator((isSubmit) ? 0 : jobMgr.getJobDetails().getNextWorkerNumberToUse(), WorkerNumberGenerator.DEFAULT_INCREMENT_STEP); this.scheduler = scheduler; this.jobMgr = jobMgr; migrationStrategy = MigrationStrategyFactory.getStrategy(jobId.getId(), migrationConfig); int noOfStages = mantisJobMetaData.getStageMetadata().size(); if (noOfStages == 1) { sinkStageNum = 1; } else { sinkStageNum = noOfStages - 1; } JobSchedulingInfo initialJS = new JobSchedulingInfo(jobMgr.getJobId().getId(), new HashMap<>()); currentJobSchedulingInfoStr = mapper.writeValueAsString(initialJS); jobSchedulingInfoBehaviorSubject = BehaviorSubject.create(initialJS); initialize(isSubmit); LOGGER.debug("Exit WorkerManager ctor"); } /** * Initializes a worker manager. * * A WorkerManager can get initialized on a job submission or a failover. * * Init from Job submission: submits initial workers which each go through their startup lifecycle. * * Init from Master failover: workers are already running; gets state from Mesos and updates its view * of the world. If worker information is bad from Mesos, gather up these worker and resubmit them * in all together after initialization of running workers. * * @param isSubmit specifies if this initialization is due to job submission or a master failover. * * @throws Exception */ void initialize(boolean isSubmit) throws Exception { LOGGER.trace("In initialize WorkerManager for Job {} with isSubmit {}", jobId, isSubmit); if (isSubmit) { submitInitialWorkers(); } else { initializeRunningWorkers(); } LOGGER.trace("Exit initialize WorkerManager for Job {}", jobId); } private void initializeRunningWorkers() { LOGGER.trace("In initializeRunningWorkers for Job {}", jobId); // Scan for the list of all corrupted workers to be resubmitted. List<JobWorker> workersToResubmit = markCorruptedWorkers(); // publish a refresh before enqueuing tasks to the Scheduler, as there is a potential race between // WorkerRegistryV2 getting updated and isWorkerValid being called from SchedulingService loop // If worker is not found in the SchedulingService loop, it is considered invalid and prematurely // removed from Fenzo state. markStageAssignmentsChanged(true); for (IMantisStageMetadata stageMeta : mantisJobMetaData.getStageMetadata().values()) { Map<Integer, WorkerHost> workerHosts = new HashMap<>(); for (JobWorker worker : stageMeta.getAllWorkers()) { IMantisWorkerMetadata wm = worker.getMetadata(); if (WorkerState.isRunningState(wm.getState())) { // send fake heartbeat try { WorkerEvent fakeHB = new WorkerHeartbeat(new Status(jobId.getId(), stageMeta.getStageNum(), wm.getWorkerIndex(), wm.getWorkerNumber(), Status.TYPE.HEARTBEAT, "", MantisJobState.Started, System.currentTimeMillis())); worker.processEvent(fakeHB, jobStore); } catch (InvalidWorkerStateChangeException | IOException e) { LOGGER.error("problem sending initial heartbeat for Job {} during initialization", worker.getMetadata().getJobId(), e); } workerHosts.put(wm.getWorkerNumber(), new WorkerHost( wm.getSlave(), wm.getWorkerIndex(), wm.getWorkerPorts().getPorts(), DataFormatAdapter.convertWorkerStateToMantisJobState(wm.getState()), wm.getWorkerNumber(), wm.getMetricsPort(), wm.getCustomPort())); ScheduleRequest scheduleRequest = createSchedulingRequest(wm, empty()); if (LOGGER.isDebugEnabled()) { LOGGER.debug("initializing Running task {}-worker-{}-{}", wm.getJobId(), wm.getWorkerIndex(), wm.getWorkerNumber()); } scheduler.initializeRunningWorker(scheduleRequest, wm.getSlave()); LOGGER.debug("Initialized running worker {}", wm.getSlave()); } else if (wm.getState().equals(WorkerState.Accepted)) { queueTask(wm); } } if (stageMeta.getStageNum() > 0) { stageAssignments.put(stageMeta.getStageNum(), new WorkerAssignments(stageMeta.getStageNum(), stageMeta.getNumWorkers(), workerHosts)); } } // publish another update after queuing tasks to Fenzo (in case some workers were marked Started // due to the Fake heartbeat in above loop) markStageAssignmentsChanged(true); // Resubmit workers with missing ports so they can be reassigned new resources. for (JobWorker jobWorker : workersToResubmit) { LOGGER.warn("discovered workers with missing ports during initialization: {}", jobWorker); try { resubmitWorker(jobWorker); } catch (Exception e) { LOGGER.warn("Exception resubmitting worker {} during initializeRunningWorkers due to {}", jobWorker, e.getMessage(), e); } } LOGGER.trace("Initialized running workers for Job {} complete", jobId); } private List<JobWorker> markCorruptedWorkers() { LOGGER.trace("Enter markCorruptedWorkers for Job {} ", jobId); List<JobWorker> corruptedWorkers = new ArrayList<>(); for (IMantisStageMetadata stageMeta : mantisJobMetaData.getStageMetadata().values()) { for (JobWorker worker : stageMeta.getAllWorkers()) { IMantisWorkerMetadata wm = worker.getMetadata(); Optional<WorkerPorts> workerPortsOptional = wm.getPorts(); if (WorkerState.isRunningState(wm.getState()) && (!workerPortsOptional.isPresent() || !workerPortsOptional.get().isValid())) { LOGGER.info("marking corrupted worker {} for Job ID {} as {}", worker.getMetadata().getWorkerId(), jobId, WorkerState.Failed); numMissingWorkerPorts.increment(); // Mark this worker as corrupted. corruptedWorkers.add(worker); // Send initial status event to signal to the worker to mark itself as failed. try { WorkerStatus status = new WorkerStatus(new Status(jobId.getId(), stageMeta.getStageNum(), wm.getWorkerIndex(), wm.getWorkerNumber(), Status.TYPE.HEARTBEAT, "", MantisJobState.Failed, System.currentTimeMillis())); worker.processEvent(status, jobStore); } catch (InvalidWorkerStateChangeException | IOException e) { LOGGER.error("problem sending initial heartbeat for Job {} during initialization", worker.getMetadata().getJobId(), e); } } } } LOGGER.trace("Exit markCorruptedWorkers for Job {} ", jobId); return corruptedWorkers; } private void markStageAssignmentsChanged(boolean forceRefresh) { this.stageAssignmentPotentiallyChanged = true; long refreshInterval = ConfigurationProvider.getConfig().getStageAssignmentRefreshIntervalMs(); if (refreshInterval == -1 || forceRefresh) { refreshStageAssignmentsAndPush(); } } private void refreshStageAssignmentsAndPush() { LOGGER.trace("Enter refreshStageAssignmentsAndPush for Job {} ", jobId); if (!stageAssignmentPotentiallyChanged) { LOGGER.debug("Worker Assignments have not changed since last push skipping."); return; } List<IMantisWorkerMetadata> acceptedAndActiveWorkers = new ArrayList<>(); List<IMantisWorkerMetadata> activeWorkers = new ArrayList<>(); for (IMantisStageMetadata stageMeta : mantisJobMetaData.getStageMetadata().values()) { Map<Integer, WorkerHost> workerHosts = new HashMap<>(); for (JobWorker worker : stageMeta.getAllWorkers()) { IMantisWorkerMetadata wm = worker.getMetadata(); if (WorkerState.isRunningState(wm.getState())) { workerHosts.put(wm.getWorkerNumber(), new WorkerHost( wm.getSlave(), wm.getWorkerIndex(), wm.getWorkerPorts().getPorts(), DataFormatAdapter.convertWorkerStateToMantisJobState(wm.getState()), wm.getWorkerNumber(), wm.getMetricsPort(), wm.getCustomPort())); activeWorkers.add(wm); acceptedAndActiveWorkers.add(wm); } else if (wm.getState().equals(WorkerState.Accepted)) { acceptedAndActiveWorkers.add(wm); } } stageAssignments.put(stageMeta.getStageNum(), new WorkerAssignments(stageMeta.getStageNum(), stageMeta.getNumWorkers(), workerHosts)); } JobSchedulingInfo jobSchedulingInfo = new JobSchedulingInfo(jobId.getId(), stageAssignments); LOGGER.debug("publishing scheduling Info for job {}", jobId); jobSchedulingInfoBehaviorSubject.onNext(jobSchedulingInfo); eventPublisher.publishWorkerListChangedEvent(new LifecycleEventsProto.WorkerListChangedEvent( new WorkerInfoListHolder(this.jobMgr.getJobId(), acceptedAndActiveWorkers))); numSchedulingChangesRefreshed.increment(); stageAssignmentPotentiallyChanged = false; LOGGER.trace("Exit refreshStageAssignmentsAndPush for Job {} ", jobId); } private void submitInitialWorkers() throws Exception { List<IMantisWorkerMetadata> workers = getInitialWorkers(mantisJobMetaData.getJobDefinition(), System.currentTimeMillis()); LOGGER.debug("Got initial workers " + workers); int beg = 0; while (true) { if (beg >= workers.size()) break; int en = beg + Math.min(workerWritesBatchSize, workers.size() - beg); final List<IMantisWorkerMetadata> workerRequests = workers.subList(beg, en); try { jobStore.storeNewWorkers(jobMgr.getJobDetails(), workerRequests); LOGGER.info("Stored workers {} for Job {}", workerRequests, jobId); // refresh Worker Registry state before enqueuing task to Scheduler markStageAssignmentsChanged(true); // queue to scheduler workerRequests.forEach(this::queueTask); } catch (Exception e) { e.printStackTrace(); LOGGER.error("Error {} storing workers of job {}", e.getMessage(), jobId.getId()); throw new RuntimeException(String.format("Exception saving worker %s for Job %s ", e.getMessage(), jobId)); } beg = en; } } private void queueTask(final IMantisWorkerMetadata workerRequest, final Optional<Long> readyAt) { final ScheduleRequest schedulingRequest = createSchedulingRequest(workerRequest, readyAt); LOGGER.info("Queueing up scheduling request {} ", schedulingRequest); try { scheduler.scheduleWorker(schedulingRequest); } catch (Exception e) { LOGGER.error("Exception queueing task", e); e.printStackTrace(); } } private void queueTask(final IMantisWorkerMetadata workerRequest) { queueTask(workerRequest, empty()); } private ScheduleRequest createSchedulingRequest(final IMantisWorkerMetadata workerRequest, final Optional<Long> readyAt) { try { LOGGER.trace("In createSchedulingRequest for worker {}", workerRequest); final WorkerId workerId = workerRequest.getWorkerId(); // setup constraints final List<ConstraintEvaluator> hardConstraints = new ArrayList<>(); final List<VMTaskFitnessCalculator> softConstraints = new ArrayList<>(); Optional<IMantisStageMetadata> stageMetadataOp = mantisJobMetaData.getStageMetadata(workerRequest.getStageNum()); LOGGER.debug("Got stageMeta {}", stageMetadataOp); if (!stageMetadataOp.isPresent()) { throw new RuntimeException(String.format("No such stage %s", workerRequest.getStageNum())); } IMantisStageMetadata stageMetadata = stageMetadataOp.get(); List<JobConstraints> stageHC = stageMetadata.getHardConstraints(); List<JobConstraints> stageSC = stageMetadata.getSoftConstraints(); final Set<String> coTasks = new HashSet<>(); if ((stageHC != null && !stageHC.isEmpty()) || (stageSC != null && !stageSC.isEmpty())) { for (JobWorker jobWorker : stageMetadata.getAllWorkers()) { if (jobWorker.getMetadata().getWorkerNumber() != workerId.getWorkerNum()) coTasks.add(workerId.getId()); } } if (stageHC != null && !stageHC.isEmpty()) { for (JobConstraints c : stageHC) { hardConstraints.add(ConstraintsEvaluators.hardConstraint(c, coTasks)); } } if (stageSC != null && !stageSC.isEmpty()) { for (JobConstraints c : stageSC) { softConstraints.add(ConstraintsEvaluators.softConstraint(c, coTasks)); } } ScheduleRequest sr = new ScheduleRequest(workerId, workerRequest.getStageNum(), workerRequest.getNumberOfPorts(), new JobMetadata(mantisJobMetaData.getJobId().getId(), mantisJobMetaData.getJobJarUrl(), mantisJobMetaData.getTotalStages(), mantisJobMetaData.getUser(), mantisJobMetaData.getSchedulingInfo(), mantisJobMetaData.getParameters(), getSubscriptionTimeoutSecs(mantisJobMetaData), mantisJobMetaData.getMinRuntimeSecs() ), mantisJobMetaData.getSla().orElse(new JobSla.Builder().build()).getDurationType(), stageMetadata.getMachineDefinition(), hardConstraints, softConstraints, readyAt.orElse(0L), workerRequest.getPreferredClusterOptional()); LOGGER.trace("created scheduleRequest {}", sr); return sr; } catch (Exception e) { e.printStackTrace(); LOGGER.error("Exception creating scheduleRequest {}", e.getMessage()); throw e; } } private List<IMantisWorkerMetadata> getInitialWorkers(JobDefinition jobDetails, long submittedAt) throws Exception { List<IMantisWorkerMetadata> workerRequests = Lists.newLinkedList(); LOGGER.debug("In getInitial Workers : " + jobDetails); SchedulingInfo schedulingInfo = jobDetails.getSchedulingInfo(); LOGGER.debug("scheduling info " + schedulingInfo); int totalStages = schedulingInfo.getStages().size(); LOGGER.debug("total stages {} ", totalStages); Iterator<Integer> it = schedulingInfo.getStages().keySet().iterator(); while (it.hasNext()) { int stageNum = it.next(); List<IMantisWorkerMetadata> stageWorkers = setupStageWorkers(schedulingInfo, totalStages, stageNum, submittedAt); workerRequests.addAll(stageWorkers); } return workerRequests; } private List<IMantisWorkerMetadata> setupStageWorkers(SchedulingInfo schedulingInfo, int totalStages, int stageNum, long submittedAt) throws Exception { LOGGER.trace("In setupStageWorkers for Job {} with sched info", jobId, schedulingInfo); List<IMantisWorkerMetadata> workerRequests = new LinkedList<>(); StageSchedulingInfo stage = schedulingInfo.getStages().get(stageNum); if (stage == null) { LOGGER.error("StageSchedulingInfo cannot be null for Stage {}", stageNum); throw new Exception("StageSchedulingInfo cannot be null for Stage " + stageNum); //return workerRequests; // can happen when stageNum=0 and there is no jobMaster defined } int numInstancesAtStage = stage.getNumberOfInstances(); // add worker request for each instance required in stage int stageIndex = 0; for (int i = 0; i < numInstancesAtStage; i++) { // during initialization worker number and index are identical int workerIndex = stageIndex++; if (!mantisJobMetaData.getStageMetadata(stageNum).isPresent()) { IMantisStageMetadata msmd = new MantisStageMetadataImpl.Builder(). withJobId(jobId) .withStageNum(stageNum) .withNumStages(totalStages) .withMachineDefinition(stage.getMachineDefinition()) .withNumWorkers(numInstancesAtStage) .withHardConstraints(stage.getHardConstraints()) .withSoftConstraints(stage.getSoftConstraints()) .withScalingPolicy(stage.getScalingPolicy()) .isScalable(stage.getScalable()) .build(); LOGGER.debug("Job Actor adding stage "); mantisJobMetaData.addJobStageIfAbsent(msmd); jobStore.updateStage(msmd); LOGGER.debug("Added " + mantisJobMetaData.getStageMetadata(stageNum)); LOGGER.debug("Total stages " + mantisJobMetaData.getTotalStages()); } IMantisWorkerMetadata mwmd = addWorker(schedulingInfo, stageNum, workerIndex); workerRequests.add(mwmd); } LOGGER.trace("Exit setupStageWorkers for Job {} ", jobId); return workerRequests; } private IMantisWorkerMetadata addWorker(SchedulingInfo schedulingInfo, int stageNo, int workerIndex) throws InvalidJobException { LOGGER.trace("In addWorker for index {} for Job {} with sched info", workerIndex, jobId, schedulingInfo); StageSchedulingInfo stageSchedInfo = schedulingInfo.getStages().get(stageNo); int workerNumber = workerNumberGenerator.getNextWorkerNumber(mantisJobMetaData, jobStore); JobWorker jw = new JobWorker.Builder() .withJobId(jobId) .withWorkerIndex(workerIndex) .withWorkerNumber(workerNumber) .withNumberOfPorts(stageSchedInfo.getMachineDefinition().getNumPorts() + MANTIS_SYSTEM_ALLOCATED_NUM_PORTS) .withStageNum(stageNo) .withLifecycleEventsPublisher(eventPublisher) .build(); if (!mantisJobMetaData.addWorkerMetadata(stageNo, jw)) { Optional<JobWorker> tmp = mantisJobMetaData.getWorkerByIndex(stageNo, workerIndex); if (tmp.isPresent()) { throw new InvalidJobException(mantisJobMetaData.getJobId().getId(), stageNo, workerIndex, new Exception("Couldn't add worker " + workerNumber + " as index " + workerIndex + ", that index already has worker " + tmp.get().getMetadata().getWorkerNumber())); } else { throw new InvalidJobException(mantisJobMetaData.getJobId().getId(), stageNo, workerIndex, new Exception("Couldn't add worker " + workerNumber + " as index " + workerIndex + "doesn't exist ")); } } LOGGER.trace("Exit addWorker for index {} for Job {} with sched info", workerIndex, jobId, schedulingInfo); return jw.getMetadata(); } @Override public void shutdown() { LOGGER.trace("Enter shutdown for Job {}", jobId); // if workers have not already completed if (!allWorkerCompleted()) { // kill workers terminateAllWorkersAsync(); } //send empty schedulingInfo changes so downstream jobs would explicitly disconnect jobSchedulingInfoBehaviorSubject.onNext(new JobSchedulingInfo(this.jobMgr.getJobId().getId(), new HashMap<>())); jobSchedulingInfoBehaviorSubject.onCompleted(); LOGGER.trace("Exit shutdown for Job {}", jobId); } private void terminateAllWorkersAsync() { LOGGER.info("Terminating all workers of job {}", jobId); Observable.from(mantisJobMetaData.getStageMetadata().values()) .flatMap((st) -> Observable.from(st.getAllWorkers())) .filter((worker) -> !WorkerState.isTerminalState(worker.getMetadata().getState())) .map((worker) -> { LOGGER.info("Terminating " + worker); terminateWorker(worker.getMetadata(), WorkerState.Completed, JobCompletedReason.Killed); return worker; }) .doOnCompleted(() -> markStageAssignmentsChanged(true)) .subscribeOn(Schedulers.io()) .subscribe(); LOGGER.info("Terminated all workers of job {}", jobId); } private void terminateWorker(IMantisWorkerMetadata workerMeta, WorkerState finalWorkerState, JobCompletedReason reason) { LOGGER.info("Terminating worker {} with number {}", workerMeta, workerMeta.getWorkerNumber()); try { WorkerId workerId = workerMeta.getWorkerId(); // call vmservice terminate scheduler.unscheduleAndTerminateWorker(workerMeta.getWorkerId(), Optional.ofNullable(workerMeta.getSlave())); LOGGER.debug("WorkerNumber->StageMap {}", mantisJobMetaData.getWorkerNumberToStageMap()); int stageNum = mantisJobMetaData.getWorkerNumberToStageMap().get(workerMeta.getWorkerNumber()); Optional<IMantisStageMetadata> stageMetaOp = mantisJobMetaData.getStageMetadata(stageNum); if (stageMetaOp.isPresent()) { // Mark work as terminal WorkerTerminate terminateEvent = new WorkerTerminate(workerId, finalWorkerState, reason); MantisStageMetadataImpl stageMetaData = (MantisStageMetadataImpl) stageMetaOp.get(); Optional<JobWorker> jobWorkerOp = stageMetaData.processWorkerEvent(terminateEvent, jobStore); // Mark work as terminal if (jobWorkerOp.isPresent()) { jobStore.archiveWorker(jobWorkerOp.get().getMetadata()); eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(INFO, "Terminated worker, reason: " + reason.name(), workerMeta.getStageNum(), workerMeta.getWorkerId(), workerMeta.getState())); } } else { LOGGER.error("Stage {} not found while terminating worker {}", stageNum, workerId); } } catch (Exception e) { LOGGER.error("Error terminating worker {}", workerMeta.getWorkerId(), e); } } private void terminateAndRemoveWorker(IMantisWorkerMetadata workerMeta, WorkerState finalWorkerState, JobCompletedReason reason) { LOGGER.info("Terminating and removing worker {}", workerMeta.getWorkerId().getId()); try { WorkerId workerId = workerMeta.getWorkerId(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("WorkerNumber->StageMap {}", mantisJobMetaData.getWorkerNumberToStageMap()); } int stageNum = mantisJobMetaData.getWorkerNumberToStageMap().get(workerMeta.getWorkerNumber()); Optional<IMantisStageMetadata> stageMetaOp = mantisJobMetaData.getStageMetadata(stageNum); if (stageMetaOp.isPresent()) { // Mark work as terminal WorkerTerminate terminateEvent = new WorkerTerminate(workerId, finalWorkerState, reason); MantisStageMetadataImpl stageMetaData = (MantisStageMetadataImpl) stageMetaOp.get(); Optional<JobWorker> workerOp = stageMetaData.processWorkerEvent(terminateEvent, jobStore); eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(INFO, "Removing worker, reason: " + reason.name(), workerMeta.getStageNum(), workerMeta.getWorkerId(), workerMeta.getState())); // remove this worker index and archives the worker stageMetaData.unsafeRemoveWorker(workerId.getWorkerIndex(), workerId.getWorkerNum(), jobStore); // call vmservice terminate scheduler.unscheduleAndTerminateWorker(workerMeta.getWorkerId(), Optional.ofNullable( workerMeta.getSlave())); //remove from workerNumber to stage map mantisJobMetaData.removeWorkerMetadata(workerMeta.getWorkerNumber()); LOGGER.info("Terminated worker {}", workerMeta); markStageAssignmentsChanged(true); } else { LOGGER.error("Stage {} not found while terminating worker {}", stageNum, workerId); } } catch (Exception e) { LOGGER.error("Error terminating worker {}", workerMeta.getWorkerId(), e); } } @Override public void refreshAndSendWorkerAssignments() { LOGGER.trace("In WorkerManager::refreshAndSendWorkerAssignments"); refreshStageAssignmentsAndPush(); LOGGER.trace("Exit WorkerManager::refreshAndSendWorkerAssignments"); } @Override public void checkHeartBeats(Instant currentTime) { LOGGER.trace("In WorkerManager::checkHeartBeats"); //// heartbeat misses are calculated as 3 * heartbeatInterval, pick 1.5 multiplier for this check interval long missedHeartBeatToleranceSecs = (long) (1.5 * ConfigurationProvider.getConfig().getWorkerTimeoutSecs()); // Allow more time for workers to start long stuckInSubmitToleranceSecs = missedHeartBeatToleranceSecs + 120; List<JobWorker> workersToResubmit = Lists.newArrayList(); // expire worker resubmit entries resubmitRateLimiter.expireResubmitRecords(currentTime.toEpochMilli()); // For each stage for (Iterator<? extends IMantisStageMetadata> stageIt = mantisJobMetaData.getStageMetadata().values().iterator(); stageIt.hasNext();) { IMantisStageMetadata stage = stageIt.next(); // For each worker in the stage for (Iterator<JobWorker> workerIt = stage.getAllWorkers().iterator(); workerIt.hasNext();) { JobWorker worker = workerIt.next(); IMantisWorkerMetadata workerMeta = worker.getMetadata(); if (!workerMeta.getLastHeartbeatAt().isPresent()) { System.out.println("1"); Instant acceptedAt = Instant.ofEpochMilli(workerMeta.getAcceptedAt()); if (Duration.between(acceptedAt, currentTime).getSeconds() > stuckInSubmitToleranceSecs) { // worker stuck in accepted workersToResubmit.add(worker); eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(WARN, "worker stuck in Accepted state, resubmitting worker", workerMeta.getStageNum(), workerMeta.getWorkerId(), workerMeta.getState())); } } else { LOGGER.debug("Duration between last heartbeat and now {} ", Duration.between(workerMeta.getLastHeartbeatAt().get(), currentTime).getSeconds()); if (Duration.between(workerMeta.getLastHeartbeatAt().get(), currentTime).getSeconds() > missedHeartBeatToleranceSecs) { // heartbeat too old LOGGER.info("Job {}, Worker {} Duration between last heartbeat and now {} " + "missed heart beat threshold {} exceeded", this.jobMgr.getJobId(), workerMeta.getWorkerId(), Duration.between(workerMeta.getLastHeartbeatAt().get(), currentTime).getSeconds(), missedHeartBeatToleranceSecs); if (ConfigurationProvider.getConfig().isHeartbeatTerminationEnabled()) { eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(WARN, "heartbeat too old, resubmitting worker", workerMeta.getStageNum(), workerMeta.getWorkerId(), workerMeta.getState())); workersToResubmit.add(worker); } else { LOGGER.warn("Heart beat based termination is disabled. Skipping termination of " + "worker {} Please see mantis.worker.heartbeat.termination.enabled", workerMeta); } } } } } for (JobWorker worker : workersToResubmit) { try { resubmitWorker(worker); } catch (Exception e) { LOGGER.warn("Exception {} occurred resubmitting Worker {}", e.getMessage(), worker.getMetadata(), e); } } migrateDisabledVmWorkers(currentTime); } @Override public void migrateDisabledVmWorkers(Instant currentTime) { LOGGER.trace("Enter migrateDisabledVmWorkers Workers To Migrate {} for Job {}", workersToMigrate, jobId); if (!workersToMigrate.isEmpty()) { Map<Integer, Integer> workerToStageMap = mantisJobMetaData.getWorkerNumberToStageMap(); final List<Integer> workers = migrationStrategy.execute(workersToMigrate, getNumberOfWorkersInStartedState(), getTotalWorkerCount(), lastWorkerMigrationTimestamp); if (!workers.isEmpty()) { LOGGER.info("Job {} Going to migrate {} workers in this iteration", jobId, workers.size()); } workers.forEach((w) -> { if (workerToStageMap.containsKey(w)) { int stageNo = workerToStageMap.get(w); Optional<IMantisStageMetadata> stageMetaOp = mantisJobMetaData.getStageMetadata(stageNo); if (stageMetaOp.isPresent()) { JobWorker jobWorker = null; try { jobWorker = stageMetaOp.get().getWorkerByWorkerNumber(w); IMantisWorkerMetadata wm = jobWorker.getMetadata(); LOGGER.info("Moving worker {} of job {} away from disabled VM", wm.getWorkerId(), jobId); eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(INFO, " Moving out of disabled VM " + wm.getSlave(), wm.getStageNum(), wm.getWorkerId(), wm.getState())); resubmitWorker(jobWorker); lastWorkerMigrationTimestamp = System.currentTimeMillis(); } catch (Exception e) { LOGGER.warn("Exception resubmitting worker {} during migration due to {}", jobWorker, e.getMessage(), e); } } else { LOGGER.warn("Stage {} Not Found. Skip move for worker {} in Job {}", stageNo, w, jobId); } } else { LOGGER.warn("worker {} not found in workerToStageMap {} for Job {}", w, workerToStageMap, jobId); } }); } if (LOGGER.isTraceEnabled()) { LOGGER.trace("Exit migrateWorkersIfNeeded Workers To Migrate {} for Job {}", workersToMigrate, jobId); } } private Optional<IMantisStageMetadata> getStageForWorker(WorkerEvent event) { if (LOGGER.isTraceEnabled()) { LOGGER.trace("Enter getStageForWorker with Num {} in Job {} ", event.getWorkerId().getWorkerNum(), this.jobMgr.getJobId()); } // Make sure we know about this worker. If not terminate it Map<Integer, Integer> workerToStageMap = mantisJobMetaData.getWorkerNumberToStageMap(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Worker to Stage Map {} in Job {}", workerToStageMap, this.jobMgr.getJobId()); } if (!workerToStageMap.containsKey(event.getWorkerId().getWorkerNum())) { LOGGER.warn("Event {} from Unknown worker {} ", event.getWorkerId(), event); LOGGER.trace("Exit getStageForWorker"); return empty(); } // Find stage associated with this worker Integer stageNum = workerToStageMap.get(event.getWorkerId().getWorkerNum()); Optional<IMantisStageMetadata> stageMetaOp = mantisJobMetaData.getStageMetadata(stageNum); if (!stageMetaOp.isPresent()) { LOGGER.warn("Stage {} not found in Job {} while processing event {}", stageNum, jobId, event); } LOGGER.trace("Exit getStageForWorker"); return stageMetaOp; } private void terminateUnknownWorkerIfNonTerminal(final WorkerEvent event) { if (!JobHelper.isTerminalWorkerEvent(event)) { LOGGER.warn("Non terminal event from Unknown worker {} in Job {}. Request Termination", event.getWorkerId(), this.jobMgr.getJobId()); Optional<String> host = JobHelper.getWorkerHostFromWorkerEvent(event); scheduler.unscheduleAndTerminateWorker(event.getWorkerId(), host); } else { LOGGER.warn("Job {} Terminal event from Unknown worker {}. Ignoring", jobId, event.getWorkerId()); } } @Override public void processEvent(WorkerEvent event, JobState jobState) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Processing worker event in Worker Manager {} in Job {}", event, this.jobMgr.getJobId()); } try { Optional<IMantisStageMetadata> stageMetaOp = getStageForWorker(event); if (!stageMetaOp.isPresent()) { terminateUnknownWorkerIfNonTerminal(event); return; } // If worker cannot be scheduled currently, then put it back on the queue with delay and don't update // its state if (event instanceof WorkerUnscheduleable) { scheduler.updateWorkerSchedulingReadyTime(event.getWorkerId(), resubmitRateLimiter.getWorkerResubmitTime(event.getWorkerId(), stageMetaOp.get().getStageNum())); eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent( LifecycleEventsProto.StatusEvent.StatusEventType.ERROR, "rate limiting: no resources to fit worker", ((WorkerUnscheduleable) event).getStageNum(), event.getWorkerId(), WorkerState.Accepted)); return; } MantisStageMetadataImpl stageMeta = (MantisStageMetadataImpl) stageMetaOp.get(); try { // Delegate processing of the event to the stage Optional<JobWorker> workerOp = stageMeta.processWorkerEvent(event, jobStore); if (!workerOp.isPresent()) { terminateUnknownWorkerIfNonTerminal(event); return; } IMantisWorkerMetadata wm = workerOp.get().getMetadata(); // If we need to migrate off of disabled VM add it to the queue if (event instanceof WorkerOnDisabledVM) { workersToMigrate.add(wm.getWorkerNumber()); return; } // Worker transitioned to terminal state resubmit if (WorkerState.isErrorState(wm.getState()) && !JobState.isTerminalState(jobState)) { eventPublisher.publishStatusEvent(new LifecycleEventsProto.WorkerStatusEvent(WARN, "resubmitting lost worker ", wm.getStageNum(), wm.getWorkerId(), wm.getState())); resubmitWorker(workerOp.get()); return; } else if (WorkerState.isTerminalState(wm.getState())) { // worker has explicitly // completed complete job jobStore.archiveWorker(wm); LOGGER.info("Received Worker Complete signal. Wait for all workers to complete before " + "terminating Job {}", jobId); } if (!(event instanceof WorkerHeartbeat)) { markStageAssignmentsChanged(false); } } catch (Exception e) { LOGGER.warn("Exception saving worker update", e); } if (!allWorkersStarted && !JobState.isTerminalState(jobState)) { if (allWorkerStarted()) { allWorkersStarted = true; jobMgr.onAllWorkersStarted(); markStageAssignmentsChanged(true); } else if (allWorkerCompleted()) { LOGGER.info("Job {} All workers completed1", jobId); allWorkersStarted = false; jobMgr.onAllWorkersCompleted(); } } else { if (allWorkerCompleted()) { LOGGER.info("Job {} All workers completed", jobId); allWorkersStarted = false; jobMgr.onAllWorkersCompleted(); } } } catch (Exception e1) { e1.printStackTrace(); LOGGER.error("Job {} Exception occurred in process worker event ", jobId, e1); } } private boolean allWorkerStarted() { Iterator<? extends IMantisStageMetadata> iterator = mantisJobMetaData.getStageMetadata().values().iterator(); while (iterator.hasNext()) { MantisStageMetadataImpl stageMeta = (MantisStageMetadataImpl) iterator.next(); if (!stageMeta.isAllWorkerStarted()) { return false; } } return true; } private int getNumberOfWorkersInStartedState() { return mantisJobMetaData.getStageMetadata().values().stream() .map((stageMeta) -> ((MantisStageMetadataImpl) stageMeta).getNumStartedWorkers()) .reduce(0, (acc, num) -> acc + num); } private int getTotalWorkerCount() { return mantisJobMetaData.getStageMetadata().values().stream() .map(IMantisStageMetadata::getNumWorkers) .reduce(0, (acc, num) -> acc + num); } private boolean allWorkerCompleted() { Iterator<? extends IMantisStageMetadata> iterator = mantisJobMetaData.getStageMetadata().values().iterator(); while (iterator.hasNext()) { MantisStageMetadataImpl stageMeta = (MantisStageMetadataImpl) iterator.next(); // skip job master worker if (stageMeta.getStageNum() == 0) { continue; } if (!stageMeta.isAllWorkerCompleted()) { return false; } } return true; } @Override public void resubmitWorker(int workerNum) throws Exception { Map<Integer, Integer> workerToStageMap = mantisJobMetaData.getWorkerNumberToStageMap(); if (workerToStageMap.containsKey(workerNum)) { int stageNum = workerToStageMap.get(workerNum); Optional<IMantisStageMetadata> stageMeta = mantisJobMetaData.getStageMetadata(stageNum); if (stageMeta.isPresent()) { JobWorker worker = stageMeta.get().getWorkerByWorkerNumber(workerNum); resubmitWorker(worker); } else { throw new Exception(String.format("Invalid stage {} in resubmit Worker request {}", stageNum, workerNum)); } } else { LOGGER.warn("No such Worker number {} in Job with ID {}", workerNum, jobId); throw new Exception(String.format("No such worker number {} in resubmit Worker request", workerNum)); } } @Override public List<IMantisWorkerMetadata> getActiveWorkers(int limit) { List<IMantisWorkerMetadata> workers = mantisJobMetaData.getStageMetadata().values() .stream() .flatMap((st) -> st.getAllWorkers().stream()) .filter((worker) -> !WorkerState.isTerminalState(worker.getMetadata().getState())) .map(JobWorker::getMetadata) .collect(Collectors.toList()); if (workers.size() > limit) { return workers.subList(0, limit); } else { return workers; } } @Override public BehaviorSubject<JobSchedulingInfo> getJobStatusSubject() { return this.jobSchedulingInfoBehaviorSubject; } private void resubmitWorker(JobWorker oldWorker) throws Exception { LOGGER.info("Resubmitting worker {}", oldWorker.getMetadata()); Map<Integer, Integer> workerToStageMap = mantisJobMetaData.getWorkerNumberToStageMap(); IMantisWorkerMetadata oldWorkerMetadata = oldWorker.getMetadata(); if (oldWorkerMetadata.getTotalResubmitCount() < ConfigurationProvider.getConfig().getMaximumResubmissionsPerWorker()) { Integer stageNo = workerToStageMap.get(oldWorkerMetadata.getWorkerId().getWorkerNum()); if (stageNo == null) { String errMsg = String.format("Stage {} not found in Job {} while resubmiting worker {}", stageNo, jobId, oldWorker); LOGGER.warn(errMsg); throw new Exception(errMsg); } Optional<IMantisStageMetadata> stageMetaOp = mantisJobMetaData.getStageMetadata(stageNo); if (!stageMetaOp.isPresent()) { String errMsg = String.format("Stage {} not found in Job {} while resubmiting worker {}", stageNo, jobId, oldWorker); LOGGER.warn(errMsg); throw new Exception(errMsg); } MantisStageMetadataImpl stageMeta = (MantisStageMetadataImpl) stageMetaOp.get(); JobWorker newWorker = new JobWorker.Builder() .withJobId(jobId) .withWorkerIndex(oldWorkerMetadata.getWorkerIndex()) .withWorkerNumber(workerNumberGenerator.getNextWorkerNumber(mantisJobMetaData, jobStore)) .withNumberOfPorts(stageMeta.getMachineDefinition().getNumPorts() + MANTIS_SYSTEM_ALLOCATED_NUM_PORTS) .withStageNum(oldWorkerMetadata.getStageNum()) .withResubmitCount(oldWorkerMetadata.getTotalResubmitCount() + 1) .withResubmitOf(oldWorkerMetadata.getWorkerNumber()) .withLifecycleEventsPublisher(eventPublisher) .build(); mantisJobMetaData.replaceWorkerMetaData(oldWorkerMetadata.getStageNum(), newWorker, oldWorker, jobStore); // kill the task if it is still running scheduler.unscheduleAndTerminateWorker(oldWorkerMetadata.getWorkerId(), Optional.ofNullable(oldWorkerMetadata.getSlave())); long workerResubmitTime = resubmitRateLimiter.getWorkerResubmitTime( newWorker.getMetadata().getWorkerId(), stageMeta.getStageNum()); Optional<Long> delayDuration = of(workerResubmitTime); // publish a refresh before enqueuing new Task to Scheduler markStageAssignmentsChanged(true); // queue the new worker for execution queueTask(newWorker.getMetadata(), delayDuration); LOGGER.info("Worker {} successfully queued for scheduling", newWorker); numWorkerResubmissions.increment(); } else { // todo numWorkerResubmitLimitReached.increment(); LOGGER.error("Resubmit count exceeded"); jobMgr.onTooManyWorkerResubmits(); } } /** * Preconditions : Stage is Valid and scalable * Determines the actual no of workers for this stage within min and max, updates the expected num workers * first and saves to store. * (If that fails we abort the operation) * then continues adding/terminating worker one by one. * If an exception occurs adding/removing any worker we continue forward with others. * Heartbeat check should kick in and resubmit any workers that didn't get scheduled */ @Override public int scaleStage(MantisStageMetadataImpl stageMetaData, int numWorkers, String reason) { LOGGER.info("Scaling stage {} to {} workers", stageMetaData.getStageNum(), numWorkers); final int oldNumWorkers = stageMetaData.getNumWorkers(); int max = ConfigurationProvider.getConfig().getMaxWorkersPerStage(); int min = 0; if (stageMetaData.getScalingPolicy() != null) { max = stageMetaData.getScalingPolicy().getMax(); min = stageMetaData.getScalingPolicy().getMin(); } // sanitize input worker count to be between min and max int newNumWorkerCount = Math.max(Math.min(numWorkers, max), min); if (newNumWorkerCount != oldNumWorkers) { try { stageMetaData.unsafeSetNumWorkers(newNumWorkerCount, jobStore); eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(INFO, "Setting #workers to " + newNumWorkerCount + " for stage " + stageMetaData.getStageNum() + ", reason=" + reason, getJobId(), getJobState())); } catch (Exception e) { String error = String.format("Exception updating stage {} worker count for Job {} due to {}", stageMetaData.getStageNum(), jobId, e.getMessage()); LOGGER.warn(error); eventPublisher.publishStatusEvent(new LifecycleEventsProto.JobStatusEvent(WARN, "Scaling stage failed for stage " + stageMetaData.getStageNum() + " reason: " + e.getMessage(), getJobId(), getJobState())); throw new RuntimeException(error); } if (newNumWorkerCount > oldNumWorkers) { for (int i = 0; i < newNumWorkerCount - oldNumWorkers; i++) { try { int newWorkerIndex = oldNumWorkers + i; SchedulingInfo schedInfo = mantisJobMetaData.getJobDefinition().getSchedulingInfo(); IMantisWorkerMetadata workerRequest = addWorker(schedInfo, stageMetaData.getStageNum(), newWorkerIndex); jobStore.storeNewWorker(workerRequest); markStageAssignmentsChanged(true); queueTask(workerRequest); } catch (Exception e) { // creating a worker failed but expected no of workers was set successfully, // during heartbeat check we will // retry launching this worker LOGGER.warn("Exception adding new worker for {}", stageMetaData.getJobId().getId(), e); } } } else { // potential bulk removal opportunity? for (int i = 0; i < oldNumWorkers - newNumWorkerCount; i++) { try { final JobWorker w = stageMetaData.getWorkerByIndex(oldNumWorkers - i - 1); terminateAndRemoveWorker(w.getMetadata(), WorkerState.Completed, JobCompletedReason.Killed); } catch (InvalidJobException e) { // deleting a worker failed but expected no of workers was set successfully, // during heartbeat check we will // retry killing this worker LOGGER.warn("Exception terminating worker for {}", stageMetaData.getJobId().getId(), e); } } } } LOGGER.info("{} Scaled stage to {} workers", stageMetaData.getJobId().getId(), newNumWorkerCount); return newNumWorkerCount; } } }
4,392
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/FilterableMantisJobMetadataWritable.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import java.net.URL; import java.util.List; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.common.Label; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.MantisJobState; import io.mantisrx.runtime.WorkerMigrationConfig; import io.mantisrx.runtime.parameter.Parameter; import io.mantisrx.server.master.store.MantisJobMetadataWritable; @JsonFilter("jobMetadata") public class FilterableMantisJobMetadataWritable extends MantisJobMetadataWritable { @JsonCreator @JsonIgnoreProperties(ignoreUnknown = true) public FilterableMantisJobMetadataWritable(@JsonProperty("jobId") String jobId, @JsonProperty("name") String name, @JsonProperty("user") String user, @JsonProperty("submittedAt") long submittedAt, @JsonProperty("startedAt") long startedAt, @JsonProperty("jarUrl") URL jarUrl, @JsonProperty("numStages") int numStages, @JsonProperty("sla") JobSla sla, @JsonProperty("state") MantisJobState state, @JsonProperty("subscriptionTimeoutSecs") long subscriptionTimeoutSecs, @JsonProperty("parameters") List<Parameter> parameters, @JsonProperty("nextWorkerNumberToUse") int nextWorkerNumberToUse, @JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig, @JsonProperty("labels") List<Label> labels) { super(jobId, name, user, submittedAt, startedAt, jarUrl, numStages, sla, state, subscriptionTimeoutSecs, parameters, nextWorkerNumberToUse, migrationConfig, labels); } }
4,393
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/JobHelper.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import static java.util.Optional.empty; import static java.util.Optional.ofNullable; import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.Optional; import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat; import io.mantisrx.master.jobcluster.job.worker.WorkerState; import io.mantisrx.master.jobcluster.job.worker.WorkerStatus; import io.mantisrx.master.jobcluster.job.worker.WorkerTerminate; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.server.master.scheduler.WorkerEvent; import io.mantisrx.server.master.scheduler.WorkerLaunched; import io.mantisrx.server.master.scheduler.WorkerResourceStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * General Job utility methods. */ public final class JobHelper { private static final Logger LOGGER = LoggerFactory.getLogger(JobHelper.class); private JobHelper() { } /** * Give scheduling info and whether job has Job Master return the list of user stages. * Job Master stage is considered a system stage so is excluded if present. * @param schedulingInfo * @param hasJobMaster * @return */ public static List<Integer> getUserStageNumbers(SchedulingInfo schedulingInfo, boolean hasJobMaster) { List<Integer> stageNumbers = new ArrayList<>(); int totalStages = schedulingInfo.getStages().size(); if (hasJobMaster) { totalStages = totalStages - 1; } for (int i = 1; i <= totalStages; i++) { stageNumbers.add(i); } return stageNumbers; } /** * Determines whether a workerevent is terminal. * @param workerEvent * @return */ public static boolean isTerminalWorkerEvent(WorkerEvent workerEvent) { if (workerEvent instanceof WorkerTerminate) { return true; } else if (workerEvent instanceof WorkerStatus) { WorkerStatus status = (WorkerStatus) workerEvent; if (WorkerState.isTerminalState(status.getState())) { return true; } } else if (workerEvent instanceof WorkerResourceStatus) { WorkerResourceStatus.VMResourceState state = ((WorkerResourceStatus) workerEvent).getState(); if (WorkerResourceStatus.VMResourceState.FAILED.equals(state) || WorkerResourceStatus.VMResourceState.COMPLETED.equals(state)) { return true; } } return false; } /** * Extract hostname from worker event if present. * @param event * @return */ public static Optional<String> getWorkerHostFromWorkerEvent(WorkerEvent event) { Optional<String> host = empty(); if (event instanceof WorkerLaunched) { host = ofNullable(((WorkerLaunched) event).getHostname()); } else if (event instanceof WorkerHeartbeat) { host = ofNullable(((WorkerHeartbeat) event).getStatus().getHostname()); } else { LOGGER.warn("Host name unknown for workerId {}", event.getWorkerId()); } return host; } /** * Called after a) All workers started and job goes to Launched state * b) Mantis Master is restarting and its reinitializing this Job * This method calculates the remaining time for this job to run. * @param maxRuntimeSecs * @param startedAt * @return */ public static long calculateRuntimeDuration(long maxRuntimeSecs, Instant startedAt) { long terminateJobInSecs = maxRuntimeSecs; if (maxRuntimeSecs > 0) { Instant now = Instant.now(); if (now.isAfter(startedAt)) { // Job was already running (Occurs when master was restarted) long elapsedSeconds = now.getEpochSecond() - startedAt.getEpochSecond(); // Calculate remaining time to run terminateJobInSecs = maxRuntimeSecs - elapsedSeconds; if (terminateJobInSecs <= 0) { // Runtime has already reached terminate. // trigger terminate in a second. terminateJobInSecs = 1; } } } return terminateJobInSecs; } }
4,394
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/IMantisJobManager.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest; import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest; import io.mantisrx.master.jobcluster.proto.JobClusterProto.KillJobRequest; import io.mantisrx.master.jobcluster.proto.JobProto; import io.mantisrx.master.jobcluster.proto.JobProto.CheckHeartBeat; import io.mantisrx.master.jobcluster.proto.JobProto.InitJob; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.scheduler.WorkerEvent; /** * An interface that declares the behavior of the Mantis Job Manager Actor. */ public interface IMantisJobManager { /** * Returns metadata associated with this job. * * @return */ IMantisJobMetadata getJobDetails(); /** * Process the scheduling info request from a client by responding with * {@link JobClusterManagerProto.GetJobSchedInfoResponse} which will stream details about the workers * for this job. * * @param r */ void onGetJobStatusSubject(JobClusterManagerProto.GetJobSchedInfoRequest r); /** * Process the discovery info request from a client by responding with * * @param r {@link JobClusterManagerProto.GetLatestJobDiscoveryInfoResponse} with latest discovery info for this job */ void onGetLatestJobDiscoveryInfo(JobClusterManagerProto.GetLatestJobDiscoveryInfoRequest r); /** * Process worker related events. Update worker state and transition worker to new states. * * @param e */ void processWorkerEvent(WorkerEvent e); /** * Returns the {@link JobId} of this job. * * @return */ JobId getJobId(); /** * Returns the current {@link JobState} of this job. * * @return */ JobState getJobState(); /** * Invoked when all workers of this job have entered the Started state for the first time. This will * transition the Job into Launched state. * * @return */ boolean onAllWorkersStarted(); /** * Invoked when all workers of this job have been terminated. This will trigger final clean up of this job. */ void onAllWorkersCompleted(); /** * Invoked when the number of automatic worker resubmits exceeds configured threshold. This will trigger * a job shutdown. * * @return */ boolean onTooManyWorkerResubmits(); /** * Invoked when a job termination request is received. This should tear down the job. * * @param state * @param reason */ void shutdown(JobState state, String reason); /** * If the job had been launched with a runtime limit then this method gets invoked after that limit has * been reached. The Job should then begin termination process. * * @param r */ void onRuntimeLimitReached(JobProto.RuntimeLimitReached r); /** * Invoked by the Job Cluster Actor to commence job initialization. * * @param i */ void onJobInitialize(InitJob i); /** * Returns Job details using {@link JobClusterManagerProto.GetJobDetailsResponse}. * * @param r */ void onGetJobDetails(GetJobDetailsRequest r); /** * Invoked at a periodic basis to make sure all workers of this job have sent heart beats within a * preconfigured interval. * * @param r */ void onCheckHeartBeats(CheckHeartBeat r); /** * Invoked during Agent fleet deployment to move workers onto the new agent fleet. * * @param r */ void onMigrateWorkers(JobProto.MigrateDisabledVmWorkersRequest r); /** * Invoked to trigger job termination. * * @param req */ void onJobKill(KillJobRequest req); /** * Invoked by either Job Master or a user to change the number of workers of this job. * * @param scaleStage */ void onScaleStage(ScaleStageRequest scaleStage); /** * Invoked to explicitly resubmit a particular worker. * * @param r */ void onResubmitWorker(ResubmitWorkerRequest r); /** * Returns a list of active workers for this job using {@link JobClusterManagerProto.ListWorkersResponse}. * * @param request */ void onListActiveWorkers(JobClusterManagerProto.ListWorkersRequest request); /** * Send worker assignments if there have been changes. * @param p */ void onSendWorkerAssignments(JobProto.SendWorkerAssignementsIfChanged p); }
4,395
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/IMantisStageMetadata.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import java.util.Collection; import java.util.List; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonSubTypes; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonTypeInfo; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.runtime.JobConstraints; import io.mantisrx.runtime.MachineDefinition; import io.mantisrx.runtime.descriptor.StageScalingPolicy; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.exceptions.InvalidJobException; /** * Represents Metadata associated with a Mantis Job stage. */ @JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "type") @JsonSubTypes({ @JsonSubTypes.Type(value = MantisStageMetadataImpl.class) }) public interface IMantisStageMetadata { /** * Returns the {@link JobId} associated with this stage. * @return */ JobId getJobId(); /** * Returns the stage number of this stage. * @return */ int getStageNum(); /** * Returns the total number of stages. * @return */ int getNumStages(); /** * Returns the {@link MachineDefinition} associated with this stage. This is the resource configuration * of the workers of this stage. * @return */ MachineDefinition getMachineDefinition(); /** * Returns the total number of workers for this tage. * @return */ int getNumWorkers(); /** * Returns the List of {@link JobConstraints} (mandatory) associated with this job. * @return */ List<JobConstraints> getHardConstraints(); /** * Returns the List of {@link JobConstraints} (best effort) associated with this job. * @return */ List<JobConstraints> getSoftConstraints(); /** * Returns the scaling policy {@link StageScalingPolicy} for this stage. * @return */ StageScalingPolicy getScalingPolicy(); /** * Returns true if this stage is scalable. * @return */ boolean getScalable(); /** * Get list of {@link JobWorker} associated with this stage. * Use getAllWorkers instead. * @return */ @Deprecated Collection<JobWorker> getWorkerByIndexMetadataSet(); /** * Get list of {@link JobWorker} associated with this stage. * @return */ Collection<JobWorker> getAllWorkers(); /** * Returns the {@link JobWorker} with the given index. * @param workerIndex * * @return * * @throws InvalidJobException */ JobWorker getWorkerByIndex(int workerIndex) throws InvalidJobException; /** * Returns the {@link JobWorker} with the given worker number. * @param workerNumber * * @return * * @throws InvalidJobException */ JobWorker getWorkerByWorkerNumber(int workerNumber) throws InvalidJobException; }
4,396
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/IMantisJobMetadata.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import java.net.URL; import java.time.Instant; import java.util.List; import java.util.Map; import java.util.Optional; import io.mantisrx.common.Label; import io.mantisrx.master.jobcluster.job.worker.JobWorker; import io.mantisrx.runtime.JobSla; import io.mantisrx.runtime.descriptor.SchedulingInfo; import io.mantisrx.runtime.parameter.Parameter; import io.mantisrx.server.master.domain.JobDefinition; import io.mantisrx.server.master.domain.JobId; import io.mantisrx.server.master.persistence.exceptions.InvalidJobException; /** * The Metadata associated with a Mantis Job. */ public interface IMantisJobMetadata { long DEFAULT_STARTED_AT_EPOCH = 0; /** * Returns the {@link JobId}. * @return */ JobId getJobId(); /** * Returns the Job Cluster Name for this job. * @return */ String getClusterName(); /** * Returns the submitter of this job. * @return */ String getUser(); /** * Returns the {@link Instant} this job was submitted. * @return */ Instant getSubmittedAtInstant(); /** * Returns an optional Instant this job went into started state. * @return */ Optional<Instant> getStartedAtInstant(); /** * Returns an optional Instant this job completed. * @return */ Optional<Instant> getEndedAtInstant(); /** * Returns the artifact associated with this job. * @return */ String getArtifactName(); /** * Returns an optional {@link JobSla} for this job if it exists. * @return */ Optional<JobSla> getSla(); /** * Returns the subscription timeout in seconds associated with this job. * @return */ long getSubscriptionTimeoutSecs(); /** * Returns the current state of this job. * @return */ JobState getState(); /** * Returns the list of {@link Parameter} associated with this job. * @return */ List<Parameter> getParameters(); /** * Returns the list of {@link Label} associated with this job. * @return */ List<Label> getLabels(); /** * Returns metadata about all the stages of this job. * @return */ Map<Integer, ? extends IMantisStageMetadata> getStageMetadata(); /** * Returns a count of the number of stages in this job. * @return */ int getTotalStages(); /** * Returns {@link IMantisStageMetadata} for the stage identified by the given stage number if one exists. * @param stageNum * @return */ Optional<IMantisStageMetadata> getStageMetadata(int stageNum); /** * Returns {@link JobWorker} associated with the given stage number and worker index if one exists. * @param stageNumber * @param workerIndex * @return * @throws InvalidJobException */ Optional<JobWorker> getWorkerByIndex(int stageNumber, int workerIndex) throws InvalidJobException; /** * Returns {@link JobWorker} associated with the given stage number and worker number if one exists. * @param workerNumber * @return * @throws InvalidJobException */ Optional<JobWorker> getWorkerByNumber(int workerNumber) throws InvalidJobException; /** * Worker numbers are assigned in an incremental fashion. This method returns the next number to use. * @return */ int getNextWorkerNumberToUse(); /** * Returns the {@link SchedulingInfo} associated with this job. * @return */ SchedulingInfo getSchedulingInfo(); /** * Returns the min runtime in seconds associated with this job (defaults to -1). * @return */ long getMinRuntimeSecs(); /** * Returns a {@link URL} pointing to the artifact used by this job. In reality this is not interpreted * as a URL. The trailing portion of this is used to identify the artifact. * @return */ URL getJobJarUrl(); /** * Returns the {@link JobDefinition} associated with this Job. * @return */ JobDefinition getJobDefinition(); }
4,397
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/FilterableMantisWorkerMetadataWritable.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonFilter; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty; import io.mantisrx.server.master.store.MantisWorkerMetadataWritable; @JsonFilter("workerMetadataList") public class FilterableMantisWorkerMetadataWritable extends MantisWorkerMetadataWritable { @JsonCreator @JsonIgnoreProperties(ignoreUnknown=true) public FilterableMantisWorkerMetadataWritable(@JsonProperty("workerIndex") int workerIndex, @JsonProperty("workerNumber") int workerNumber, @JsonProperty("jobId") String jobId, @JsonProperty("stageNum") int stageNum, @JsonProperty("numberOfPorts") int numberOfPorts) { super(workerIndex, workerNumber, jobId, stageNum, numberOfPorts); } }
4,398
0
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster
Create_ds/mantis-control-plane/server/src/main/java/io/mantisrx/master/jobcluster/job/JobState.java
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.master.jobcluster.job; import java.util.HashMap; import java.util.Map; /** * Declares the states a Job can be in. */ public enum JobState { /** * The Initial job state. */ Accepted, /** * Indicates the job is running. */ Launched, // scheduled and sent to slave /** * Indicates the job is in the process of terminating due to an error. */ Terminating_abnormal, /** * Indicates the job is in the process of terminating due to normal reasons. */ Terminating_normal, /** * Indicates job is in terminal state and that the termination was abnormal. */ Failed, // OK to handle as a resubmit /** * Indicates job is in terminal state and that the termination was normal. */ Completed, // terminal state, not necessarily successful /** * Place holder state. */ Noop; // internal use only private static final Map<JobState, JobState[]> VALID_CHANGES; private static final Map<JobState, MetaState> META_STATES; static { VALID_CHANGES = new HashMap<>(); VALID_CHANGES.put(Accepted, new JobState[] { Accepted, Launched, Terminating_abnormal, Terminating_normal, Failed, Completed }); VALID_CHANGES.put(Launched, new JobState[] { Launched, Terminating_abnormal, Terminating_normal, Failed, Completed }); VALID_CHANGES.put(Terminating_abnormal, new JobState[] { Terminating_abnormal, Failed }); VALID_CHANGES.put(Terminating_normal, new JobState[] { Terminating_normal, Completed }); VALID_CHANGES.put(Failed, new JobState[] {}); VALID_CHANGES.put(Completed, new JobState[] {}); META_STATES = new HashMap<>(); META_STATES.put(Accepted, MetaState.Active); META_STATES.put(Launched, MetaState.Active); META_STATES.put(Failed, MetaState.Terminal); META_STATES.put(Completed, MetaState.Terminal); META_STATES.put(Terminating_abnormal, MetaState.Terminal); META_STATES.put(Terminating_normal, MetaState.Terminal); } /** * A higher level roll up of states indicating active or terminal status of the job. */ public enum MetaState { /** * Indicates the job is active. */ Active, /** * Indicates the job is completed. */ Terminal } /** * Rolls up given {@link JobState} to a {@link MetaState}. * * @param state * * @return */ public static MetaState toMetaState(JobState state) { return META_STATES.get(state); } /** * Checks if the transition to the given state is valid from current state. * * @param newState * * @return */ public boolean isValidStateChgTo(JobState newState) { for (JobState validState : VALID_CHANGES.get(this)) if (validState == newState) return true; return false; } /** * Returns true if the current state is terminal. * * @param state * * @return */ public static boolean isTerminalState(JobState state) { switch (state) { case Failed: case Completed: case Terminating_normal: case Terminating_abnormal: return true; default: return false; } } /** * Returns true if the current state is abnormal. * * @param started * * @return */ public static boolean isErrorState(JobState started) { switch (started) { case Failed: case Terminating_abnormal: return true; default: return false; } } /** * Returns true if the job is active. * * @param state * * @return */ public static boolean isRunningState(JobState state) { switch (state) { case Launched: return true; default: return false; } } }
4,399