index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/descriptor/StageSchedulingInfo.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.descriptor;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonInclude;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonInclude.Include;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.io.Serializable;
import java.util.List;
import java.util.Map;
import javax.annotation.Nullable;
import lombok.Builder;
import lombok.Singular;
@Builder(toBuilder = true)
public class StageSchedulingInfo implements Serializable {
private static final long serialVersionUID = 1L;
private final int numberOfInstances;
private final MachineDefinition machineDefinition;
@Singular(ignoreNullCollections = true) private final List<JobConstraints> hardConstraints;
@Singular(ignoreNullCollections = true) private final List<JobConstraints> softConstraints;
@Nullable
private final StageScalingPolicy scalingPolicy;
private final boolean scalable;
/**
* Nullable field to store container attributes like sku ID assigned to this stage.
*/
@JsonInclude(Include.NON_NULL)
private final Map<String, String> containerAttributes;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public StageSchedulingInfo(@JsonProperty("numberOfInstances") int numberOfInstances,
@JsonProperty("machineDefinition") MachineDefinition machineDefinition,
@JsonProperty("hardConstraints") List<JobConstraints> hardConstraints,
@JsonProperty("softConstraints") List<JobConstraints> softConstraints,
@JsonProperty("scalingPolicy") StageScalingPolicy scalingPolicy,
@JsonProperty("scalable") boolean scalable,
@JsonProperty("containerAttributes") Map<String, String> containerAttributes) {
this.numberOfInstances = numberOfInstances;
this.machineDefinition = machineDefinition;
this.hardConstraints = hardConstraints;
this.softConstraints = softConstraints;
this.scalingPolicy = scalingPolicy;
this.scalable = scalable;
this.containerAttributes = containerAttributes;
}
public int getNumberOfInstances() {
return numberOfInstances;
}
public MachineDefinition getMachineDefinition() {
return machineDefinition;
}
public List<JobConstraints> getHardConstraints() {
return hardConstraints;
}
public List<JobConstraints> getSoftConstraints() {
return softConstraints;
}
@Nullable
public StageScalingPolicy getScalingPolicy() {
return scalingPolicy;
}
public boolean getScalable() {
return scalable;
}
public Map<String, String> getContainerAttributes() {
return this.containerAttributes;
}
@Override
public String toString() {
return "StageSchedulingInfo{" +
"numberOfInstances=" + numberOfInstances +
", machineDefinition=" + machineDefinition +
", hardConstraints=" + hardConstraints +
", softConstraints=" + softConstraints +
", scalingPolicy=" + scalingPolicy +
", scalable=" + scalable +
", containerAttributes=" + containerAttributes +
'}';
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((hardConstraints == null) ? 0 : hardConstraints.hashCode());
result = prime * result + ((machineDefinition == null) ? 0 : machineDefinition.hashCode());
result = prime * result + numberOfInstances;
result = prime * result + (scalable ? 1231 : 1237);
result = prime * result + ((scalingPolicy == null) ? 0 : scalingPolicy.hashCode());
result = prime * result + ((softConstraints == null) ? 0 : softConstraints.hashCode());
result = prime * result + ((containerAttributes == null) ? 0 : containerAttributes.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
StageSchedulingInfo other = (StageSchedulingInfo) obj;
if (hardConstraints == null) {
if (other.hardConstraints != null)
return false;
} else if (!hardConstraints.equals(other.hardConstraints))
return false;
if (machineDefinition == null) {
if (other.machineDefinition != null)
return false;
} else if (!machineDefinition.equals(other.machineDefinition))
return false;
if (numberOfInstances != other.numberOfInstances)
return false;
if (scalable != other.scalable)
return false;
if (scalingPolicy == null) {
if (other.scalingPolicy != null)
return false;
} else if (!scalingPolicy.equals(other.scalingPolicy))
return false;
if (softConstraints == null) {
if (other.softConstraints != null)
return false;
} else if (!softConstraints.equals(other.softConstraints))
return false;
if (containerAttributes == null) {
if (other.containerAttributes != null)
return false;
} else if (!containerAttributes.equals(other.containerAttributes))
return false;
return true;
}
}
| 7,800 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/parameter/Parameter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.parameter;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.io.Serializable;
import lombok.EqualsAndHashCode;
@EqualsAndHashCode
public class Parameter implements Serializable {
private static final long serialVersionUID = 1L;
private final String name;
private final String value;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public Parameter(@JsonProperty("name") String name,
@JsonProperty("value") String value) {
this.name = name;
this.value = value;
}
public String getName() {
return name;
}
public String getValue() {
return value;
}
@Override
public String toString() {
return "Parameter{" +
"name='" + name + '\'' +
", value='" + value + '\'' +
'}';
}
}
| 7,801 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/parameter/SinkParameter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.parameter;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
/**
* Sink parameter tuple
*
* @author njoshi
*/
public class SinkParameter {
final private String name;
final private String value;
final private String encodedValue;
/**
* @param name
* @param value
*
* @throws UnsupportedEncodingException
*/
public SinkParameter(String name, String value) throws UnsupportedEncodingException {
this.name = name;
this.value = value == null ? "" : value;
encodedValue = URLEncoder.encode(this.value, "UTF-8");
}
public String getName() {
return name;
}
public String getValue() {
return value;
}
public String getEncodedValue() {
return encodedValue;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result
+ ((encodedValue == null) ? 0 : encodedValue.hashCode());
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + ((value == null) ? 0 : value.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SinkParameter other = (SinkParameter) obj;
if (encodedValue == null) {
if (other.encodedValue != null)
return false;
} else if (!encodedValue.equals(other.encodedValue))
return false;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
if (value == null) {
if (other.value != null)
return false;
} else if (!value.equals(other.value))
return false;
return true;
}
}
| 7,802 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/parameter/SinkParameters.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.parameter;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* @author njoshi
* Parameters used to connect to an SSE sink
*/
public class SinkParameters {
final List<SinkParameter> params;
public SinkParameters(List<SinkParameter> params) {
this.params = params;
}
public List<SinkParameter> getSinkParams() {
return Collections.unmodifiableList(this.params);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((params == null) ? 0 : params.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SinkParameters other = (SinkParameters) obj;
if (params == null) {
if (other.params != null)
return false;
} else if (!params.equals(other.params))
return false;
return true;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
if (params != null && params.size() > 0) {
int size = params.size();
int count = 0;
sb.append("?");
for (SinkParameter param : params) {
sb.append(param.getName());
sb.append("=");
if (param.getValue() != null && param.getEncodedValue() != null) {
sb.append(param.getEncodedValue());
}
if (count < size - 1) {
sb.append("&");
}
count++;
}
}
return sb.toString();
}
public static class Builder {
List<SinkParameter> parameters = new ArrayList<SinkParameter>();
public Builder parameters(SinkParameter... params) {
for (SinkParameter p : params)
this.parameters.add(p);
return this;
}
public Builder withParameter(SinkParameter param) {
this.parameters.add(param);
return this;
}
public Builder withParameter(String name, String value) throws UnsupportedEncodingException {
this.parameters(new SinkParameter(name, value));
return this;
}
public SinkParameters build() {
return new SinkParameters(parameters);
}
}
}
| 7,803 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/command/CommandException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.command;
public class CommandException extends Exception {
private static final long serialVersionUID = 1L;
public CommandException(String message, Throwable cause) {
super(message, cause);
}
public CommandException(String message) {
super(message);
}
public CommandException(Throwable cause) {
super(cause);
}
}
| 7,804 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/command/InvalidJobException.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.command;
public class InvalidJobException extends CommandException {
private static final long serialVersionUID = 1L;
public InvalidJobException(String message) {
super(message);
}
}
| 7,805 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/server/core/ServiceRegistry.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.core;
import io.mantisrx.common.properties.DefaultMantisPropertiesLoader;
import io.mantisrx.common.properties.MantisPropertiesLoader;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicReference;
import lombok.Value;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ServiceRegistry {
private static Logger logger = LoggerFactory.getLogger(ServiceRegistry.class);
private final AtomicReference<State> registryRef = new AtomicReference<>(null);
public static ServiceRegistry INSTANCE = new ServiceRegistry();
private ServiceRegistry() {
}
public void setMantisPropertiesService(MantisPropertiesLoader service) {
logger.debug("Setting Mantis Properties Service to {}", service);
if (!registryRef.compareAndSet(null, new State(service, new Exception()))) {
logger.error(
"MantisPropertiesService already set to {} as part of the below stacktrace",
registryRef.get().getPropertiesLoader(), registryRef.get().getStackTrace());
}
}
public MantisPropertiesLoader getPropertiesService() {
if (registryRef.get() == null) {
setMantisPropertiesService(loadMantisPropertiesLoader());
}
return registryRef.get().getPropertiesLoader();
}
private static MantisPropertiesLoader loadMantisPropertiesLoader() {
MantisPropertiesLoader mpl = new DefaultMantisPropertiesLoader(new Properties());
try {
mpl = (MantisPropertiesLoader) Class.forName("com.netflix.mantis.common.properties.MantisFastPropertiesLoader").getConstructor(Properties.class)
.newInstance(new Properties());
} catch (Exception e) {
logger.warn("Could not load MantisFastPropertiesLoader");
}
return mpl;
}
@Value
class State {
MantisPropertiesLoader propertiesLoader;
Exception stackTrace;
}
}
| 7,806 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/WorkerConstants.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common;
public class WorkerConstants {
public static final String WORKER_CONTAINER_DEFINITION_ID = "MANTIS_WORKER_CONTAINER_DEFINITION_ID";
public static final String WORKER_TASK_ATTRIBUTE_ENV_KEY = "MANTIS_WORKER_CONTAINER_ATTRIBUTE";
// TODO(fdichiara): make this configurable.
public static final String AUTO_SCALE_GROUP_KEY = "NETFLIX_AUTO_SCALE_GROUP";
public static final String MANTIS_WORKER_CONTAINER_GENERATION = "MANTIS_WORKER_CONTAINER_GENERATION";
}
| 7,807 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/Label.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
public class Label {
private final String name;
private final String value;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public Label(@JsonProperty("name") String n, @JsonProperty("value") String v) {
this.name = n;
this.value = v;
}
public String getName() {
return name;
}
public String getValue() {
return value;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + ((value == null) ? 0 : value.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Label other = (Label) obj;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
if (value == null) {
if (other.value != null)
return false;
} else if (!value.equals(other.value))
return false;
return true;
}
@Override
public String toString() {
return "Label [name=" + name + ", value=" + value + "]";
}
} | 7,808 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/MantisServerSentEvent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common;
public class MantisServerSentEvent {
private final String eventData;
public MantisServerSentEvent(String data) {
this.eventData = data;
}
public String getEventAsString() {
return eventData;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result
+ ((eventData == null) ? 0 : eventData.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
MantisServerSentEvent other = (MantisServerSentEvent) obj;
if (eventData == null) {
if (other.eventData != null)
return false;
} else if (!eventData.equals(other.eventData))
return false;
return true;
}
@Override
public String toString() {
return "MantisServerSentEvent [eventData=" + eventData + "]";
}
}
| 7,809 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/MantisProperties.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common;
import java.util.Map;
public class MantisProperties {
private static final MantisProperties instance = new MantisProperties();
private Map<String, String> env;
private MantisProperties() {
env = System.getenv();
}
@Deprecated
public static MantisProperties getInstance() {
return instance;
}
@Deprecated
/**
* Use {@link #getProperty(String)} instead.
*/
public String getStringValue(String name) {
if (name != null) {
return getProperty(name, env.get(name));
} else {
return null;
}
}
public static String getProperty(String key) {
return getProperty(key, null);
}
public static String getProperty(String key, String defaultVal) {
if (key == null) {
return null;
}
String value = System.getProperty(key);
if (value != null) {
return value;
}
value = System.getenv(key);
if (value != null) {
return value;
}
return defaultVal;
}
}
| 7,810 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/WorkerPorts.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import lombok.EqualsAndHashCode;
import lombok.ToString;
/**
* Worker Ports has the following semantics:
* 1. capture the individual ports for each of metrics, debugging, console, custom, sink
* 2. ports[0] should capture the sink port because of legacy reasons.
*/
@EqualsAndHashCode
@ToString
public class WorkerPorts implements Serializable {
private static final long serialVersionUID = 1L;
private final int metricsPort;
private final int debugPort;
private final int consolePort;
private final int customPort;
private final int sinkPort;
private final List<Integer> ports;
public WorkerPorts(final List<Integer> assignedPorts) {
if (assignedPorts.size() < 5) {
throw new IllegalArgumentException("assignedPorts should have at least 5 ports");
}
this.metricsPort = assignedPorts.get(0);
this.debugPort = assignedPorts.get(1);
this.consolePort = assignedPorts.get(2);
this.customPort = assignedPorts.get(3);
this.sinkPort = assignedPorts.get(4);
this.ports = ImmutableList.of(assignedPorts.get(4));
if (!isValid()) {
throw new IllegalStateException("worker validation failed on port allocation");
}
}
public WorkerPorts(int metricsPort, int debugPort, int consolePort, int customPort, int sinkPort) {
this(ImmutableList.of(metricsPort, debugPort, consolePort, customPort, sinkPort));
}
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public WorkerPorts(@JsonProperty("metricsPort") int metricsPort,
@JsonProperty("debugPort") int debugPort,
@JsonProperty("consolePort") int consolePort,
@JsonProperty("customPort") int customPort,
@JsonProperty("ports") List<Integer> ports) {
this(ImmutableList.<Integer>builder()
.add(metricsPort)
.add(debugPort)
.add(consolePort)
.add(customPort)
.addAll(ports)
.build());
}
public int getMetricsPort() {
return metricsPort;
}
public int getDebugPort() {
return debugPort;
}
public int getConsolePort() {
return consolePort;
}
public int getCustomPort() {
return customPort;
}
public int getSinkPort() { return sinkPort; }
public List<Integer> getPorts() {
return ports;
}
@JsonIgnore
public List<Integer> getAllPorts() {
final List<Integer> allPorts = new ArrayList<>(ports);
allPorts.add(metricsPort);
allPorts.add(debugPort);
allPorts.add(consolePort);
allPorts.add(customPort);
return allPorts;
}
/**
* Validates that this object has at least 5 valid ports and all of them are unique.
*/
private boolean isValid() {
Set<Integer> uniquePorts = new HashSet<>();
uniquePorts.add(metricsPort);
uniquePorts.add(consolePort);
uniquePorts.add(debugPort);
uniquePorts.add(customPort);
uniquePorts.add(sinkPort);
return uniquePorts.size() >= 5
&& isValidPort(metricsPort)
&& isValidPort(consolePort)
&& isValidPort(debugPort)
&& isValidPort(customPort)
&& isValidPort(sinkPort);
}
@JsonIgnore
public int getNumberOfPorts() {
return ports.size();
}
/**
* A port with 0 is technically correct, but we disallow it because there would be an inconsistency between
* what unused port the OS selects (some port number) and what this object's metadata holds (0).
*/
private boolean isValidPort(int port) {
return port > 0 && port <= 65535;
}
}
| 7,811 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/SystemParameters.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common;
public final class SystemParameters {
public static final String JOB_MASTER_AUTOSCALE_METRIC_SYSTEM_PARAM = "mantis.jobmaster.autoscale.metric";
public static final String JOB_MASTER_AUTOSCALE_CONFIG_SYSTEM_PARAM = "mantis.jobmaster.autoscale.adaptive.config";
public static final String JOB_MASTER_CLUTCH_SYSTEM_PARAM = "mantis.jobmaster.clutch.config";
public static final String JOB_MASTER_CLUTCH_EXPERIMENTAL_PARAM = "mantis.jobmaster.clutch.experimental.enabled";
public static final String JOB_MASTER_AUTOSCALE_SOURCEJOB_METRIC_PARAM = "mantis.jobmaster.autoscale.sourcejob.metric.enabled";
public static final String JOB_MASTER_AUTOSCALE_SOURCEJOB_TARGET_PARAM = "mantis.jobmaster.autoscale.sourcejob.target";
public static final String JOB_MASTER_AUTOSCALE_SOURCEJOB_DROP_METRIC_PATTERNS_PARAM = "mantis.jobmaster.autoscale.sourcejob.dropMetricPatterns";
public static final String JOB_WORKER_HEARTBEAT_INTERVAL_SECS = "mantis.job.worker.heartbeat.interval.secs";
public static final String JOB_WORKER_TIMEOUT_SECS = "mantis.job.worker.timeout.secs";
public static final String PER_STAGE_JVM_OPTS_FORMAT = "MANTIS_WORKER_JVM_OPTS_STAGE%d";
public static final String STAGE_CONCURRENCY = "mantis.stageConcurrency";
public static final int MAX_NUM_STAGES_FOR_JVM_OPTS_OVERRIDE = 5;
private SystemParameters() {}
}
| 7,812 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/Ack.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common;
import java.io.Serializable;
import lombok.Value;
/**
* Ack is sent whenever an effect has taken place on the side of the receiver.
* Ack instance supports both JSON / Java serialization.
*/
@Value(staticConstructor = "getInstance")
public class Ack implements Serializable {
private static final long serialVersionUID = 1L;
}
| 7,813 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/MantisGroup.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common;
public class MantisGroup<K, V> {
private K keyValue;
private V value;
public MantisGroup(K keyValue, V value) {
this.keyValue = keyValue;
this.value = value;
}
public K getKeyValue() {
return keyValue;
}
public V getValue() {
return value;
}
}
| 7,814 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/MetricsPublisherNoOp.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics;
import java.util.Collection;
import java.util.Properties;
public class MetricsPublisherNoOp extends MetricsPublisher {
public MetricsPublisherNoOp(Properties properties) {
super(properties);
}
@Override
public void publishMetrics(long timestamp,
Collection<Metrics> currentMetricsRegistered) {}
}
| 7,815 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/MetricsServer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics;
import io.mantisrx.common.metrics.measurement.CounterMeasurement;
import io.mantisrx.common.metrics.measurement.GaugeMeasurement;
import io.mantisrx.common.metrics.measurement.Measurements;
import io.mantisrx.common.metrics.spectator.MetricId;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import io.netty.buffer.ByteBuf;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;
import mantis.io.reactivex.netty.RxNetty;
import mantis.io.reactivex.netty.pipeline.PipelineConfigurators;
import mantis.io.reactivex.netty.protocol.http.server.HttpServer;
import mantis.io.reactivex.netty.protocol.http.server.HttpServerRequest;
import mantis.io.reactivex.netty.protocol.http.server.HttpServerResponse;
import mantis.io.reactivex.netty.protocol.http.server.RequestHandler;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Func1;
public class MetricsServer {
private static final Logger logger = LoggerFactory.getLogger(MetricsServer.class);
private final ObjectMapper mapper = new ObjectMapper();
private HttpServer<ByteBuf, ServerSentEvent> server;
private int port;
private Map<String, String> tags;
private long publishRateInSeconds;
public MetricsServer(int port, long publishRateInSeconds, Map<String, String> tags) {
this.port = port;
this.publishRateInSeconds = publishRateInSeconds;
this.tags = tags;
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
mapper.registerModule(new Jdk8Module());
}
private Observable<Measurements> measurements(long timeFrequency) {
final MetricsRegistry registry = MetricsRegistry.getInstance();
return
Observable.interval(0, timeFrequency, TimeUnit.SECONDS)
.flatMap(new Func1<Long, Observable<Measurements>>() {
@Override
public Observable<Measurements> call(Long t1) {
long timestamp = System.currentTimeMillis();
List<Measurements> measurements = new ArrayList<>();
for (Metrics metrics : registry.metrics()) {
Collection<CounterMeasurement> counters = new LinkedList<>();
Collection<GaugeMeasurement> gauges = new LinkedList<>();
for (Entry<MetricId, Counter> counterEntry : metrics.counters().entrySet()) {
Counter counter = counterEntry.getValue();
counters.add(new CounterMeasurement(counterEntry.getKey().metricName(), counter.value()));
}
for (Entry<MetricId, Gauge> gaugeEntry : metrics.gauges().entrySet()) {
gauges.add(new GaugeMeasurement(gaugeEntry.getKey().metricName(), gaugeEntry.getValue().doubleValue()));
}
measurements.add(new Measurements(metrics.getMetricGroupId().id(),
timestamp, counters, gauges, tags));
}
return Observable.from(measurements);
}
});
}
public void start() {
final Observable<Measurements> measurements = measurements(publishRateInSeconds);
logger.info("Starting metrics server on port: " + port);
server = RxNetty.createHttpServer(
port,
new RequestHandler<ByteBuf, ServerSentEvent>() {
@Override
public Observable<Void> handle(HttpServerRequest<ByteBuf> request, final HttpServerResponse<ServerSentEvent> response) {
final Map<String, List<String>> queryParameters = request.getQueryParameters();
final List<String> namesToFilter = new LinkedList<>();
logger.info("got query params {}", queryParameters);
if (queryParameters != null && queryParameters.containsKey("name")) {
namesToFilter.addAll(queryParameters.get("name"));
}
Observable<Measurements> filteredObservable = measurements
.filter(new Func1<Measurements, Boolean>() {
@Override
public Boolean call(Measurements measurements) {
if (!namesToFilter.isEmpty()) {
// check filters
for (String name : namesToFilter) {
if (name.indexOf('*') != -1) {
// check for ends with
if (name.indexOf('*') == 0 && measurements.getName().endsWith(name.substring(1)))
return true;
// check for starts with
if (name.indexOf('*') > 0 && measurements.getName().startsWith(name.substring(0, name.indexOf('*')))) {
return true;
}
}
if (measurements.getName().equals(name)) {
return true; // filter match
}
}
return false; // not found in filters
} else {
return true; // no filters provided
}
}
});
return filteredObservable.flatMap(new Func1<Measurements, Observable<Void>>() {
@Override
public Observable<Void> call(Measurements metrics) {
response.getHeaders().set("Access-Control-Allow-Origin", "*");
response.getHeaders().set("content-type", "text/event-stream");
ServerSentEvent event = null;
try {
ByteBuf data = response.getAllocator().buffer().writeBytes((mapper.writeValueAsString(metrics)).getBytes());
event = new ServerSentEvent(data);
//event = new ServerSentEvent(null, "data", mapper.writeValueAsString(metrics));
} catch (JsonProcessingException e) {
logger.error("Failed to map metrics to JSON", e);
}
if (event != null) {
response.write(event);
return response.writeStringAndFlush("\n");
}
return null;
}
});
}
},
PipelineConfigurators.<ByteBuf>serveSseConfigurator()
).start();
}
public void shutdown() {
if (server != null) {
logger.info("Shutting down metrics server on port");
logger.info("Waiting (2 x push-period) to flush buffers, before shut down.");
try {
TimeUnit.SECONDS.sleep(2);
server.shutdown();
} catch (InterruptedException e) {
logger.warn("Failed to shutdown metrics server", e);
}
}
}
}
| 7,816 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/Gauge.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics;
import io.mantisrx.common.metrics.spectator.MetricId;
public interface Gauge {
String event();
MetricId id();
void set(double value);
void increment();
void increment(double value);
void decrement();
void decrement(double value);
void set(long value);
void increment(long value);
void decrement(long value);
long value();
default double doubleValue() {
return value() * 1.0;
}
}
| 7,817 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/MetricsRegistry.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
public class MetricsRegistry {
private static final MetricsRegistry instance = new MetricsRegistry();
private ConcurrentMap<String, Metrics> metricsRegistered = new ConcurrentHashMap<>();
private MetricsRegistry() {}
public static MetricsRegistry getInstance() {
return instance;
}
/**
* Register metrics if not already registered with the metrics' name.
*
* @param metrics
*
* @return the already registered metrics object associated with metrics' name, or null.
*/
public Metrics registerAndGet(Metrics metrics) {
final Metrics old = metricsRegistered.putIfAbsent(metrics.getMetricGroupId().id(), metrics);
if (old == null) {
return metrics;
}
return old;
}
Collection<Metrics> metrics() {
return metricsRegistered.values();
}
public Collection<Metrics> getMetrics(String prefix) {
List<Metrics> result = new ArrayList<>();
for (Metrics m : metricsRegistered.values()) {
if (m.getMetricGroupId().id().startsWith(prefix))
result.add(m);
}
return result;
}
@Deprecated
public boolean remove(String metricGroupId) {
return metricsRegistered.remove(metricGroupId) != null;
}
public boolean remove(final MetricGroupId metricGroupId) {
return metricsRegistered.remove(metricGroupId.id()) != null;
}
@Deprecated
public Metrics getMetric(String metricGroupId) {
return metricsRegistered.get(metricGroupId);
}
public Metrics getMetric(final MetricGroupId metricGroupId) {
return metricsRegistered.get(metricGroupId.id());
}
}
| 7,818 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/LoggingMetricsPublisher.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics;
import io.mantisrx.shaded.com.google.common.base.Joiner;
import java.time.Instant;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
/**
* This implementation will print the selected metric groups in log.
* To use this publisher override config "mantis.metricsPublisher.class" in the master-*.properties file.
*/
@Slf4j
public class LoggingMetricsPublisher extends MetricsPublisher {
public static final String LOGGING_ENABLED_METRICS_GROUP_ID_LIST_KEY =
"MANTIS_LOGGING_ENABLED_METRICS_GROUP_ID_LIST";
private Set<String> loggingEnabledMetricsGroupId = new HashSet<>();
public LoggingMetricsPublisher(Properties properties) {
super(properties);
String key = properties.getProperty(LOGGING_ENABLED_METRICS_GROUP_ID_LIST_KEY,
System.getenv(LOGGING_ENABLED_METRICS_GROUP_ID_LIST_KEY));
log.info("LOGGING_ENABLED_METRICS_GROUP_ID_LIST_KEY: {}", key);
if (key != null) {
this.loggingEnabledMetricsGroupId =
Arrays.stream(key
.toLowerCase().split(";")).collect(Collectors.toSet());
log.info("[Metrics Publisher] enable logging for: {}",
Joiner.on(',').join(this.loggingEnabledMetricsGroupId));
}
}
@Override
public void publishMetrics(long timestamp,
Collection<Metrics> currentMetricsRegistered) {
log.info("Printing metrics from: {}", Instant.ofEpochMilli(timestamp));
currentMetricsRegistered.stream()
.filter(ms -> this.loggingEnabledMetricsGroupId.contains(ms.getMetricGroupId().id().toLowerCase()))
.map(ms -> ms.counters().entrySet())
.flatMap(Collection::stream)
.forEach(m -> log.info("[METRICS] {} : {}", m.getKey(), m.getValue().value()));
}
}
| 7,819 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/GroupedCounter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics;
public abstract class GroupedCounter {
private String familyName;
public GroupedCounter(String familyName, String... events) {
this.familyName = familyName;
}
public String familyName() {
return familyName;
}
public abstract void increment(String event);
public abstract long count(String event);
public abstract long rateCount(String event);
public abstract long rateTimeInMilliseconds();
}
| 7,820 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/GaugeCallback.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics;
import io.mantisrx.common.metrics.spectator.MetricId;
import rx.functions.Func0;
public class GaugeCallback implements Gauge {
public static final String LEGACY_GAUGE_CALLBACK_METRICGROUP = "legacyGaugeCallbackMetricGroup";
private String event;
private Func0<Long> valueCallback;
private MetricId metricId;
/**
* @deprecated use {@link io.mantisrx.common.metrics.spectator.GaugeCallback} instead
*/
@Deprecated
public GaugeCallback(String event, Func0<Long> valueCallback) {
this.event = event;
this.valueCallback = valueCallback;
this.metricId = new MetricId(LEGACY_GAUGE_CALLBACK_METRICGROUP, event);
}
@Override
public String event() {
return event;
}
@Override
public long value() {
return valueCallback.call();
}
@Override
public void increment() {}
@Override
public void decrement() {}
@Override
public void set(double value) {}
@Override
public void increment(double value) {}
@Override
public void decrement(double value) {}
@Override
public void set(long value) {}
@Override
public void increment(long value) {}
@Override
public void decrement(long value) {}
@Override
public MetricId id() {
return metricId;
}
} | 7,821 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/Timer.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics;
import com.netflix.spectator.api.Measurement;
import io.mantisrx.common.metrics.spectator.MetricId;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
public interface Timer {
void record(long amount, TimeUnit unit);
<T> T record(Callable<T> f) throws Exception;
void record(Runnable f);
long count();
long totalTime();
MetricId id();
Iterable<Measurement> measure();
boolean hasExpired();
}
| 7,822 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/Counter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics;
import io.mantisrx.common.metrics.spectator.MetricId;
public interface Counter {
public void increment();
public void increment(long x);
public long value();
/**
* @return rate value of counter
*
* @deprecated this api will be deprecated after metrics are migrated to spectator-api
*/
@Deprecated
public long rateValue();
/**
* @return rate time in millis
*
* @deprecated this api will be deprecated after metrics are migrated to spectator-api
*/
@Deprecated
public long rateTimeInMilliseconds();
public String event();
public MetricId id();
}
| 7,823 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/MetricsPublisher.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import rx.Observable;
import rx.Subscription;
import rx.functions.Action1;
public abstract class MetricsPublisher {
protected Map<String, String> commonTags = new HashMap<>();
private Properties properties;
private Subscription subscription;
public MetricsPublisher(Properties properties) {
this.properties = properties;
}
protected Properties getPropertis() {
return properties;
}
public void start(int pollMetricsRegistryFrequencyInSeconds, final Map<String, String> commonTags) {
this.commonTags.putAll(commonTags);
// read from metrics registry every publishRateInSeconds
final MetricsRegistry registry = MetricsRegistry.getInstance();
subscription =
Observable.interval(0, pollMetricsRegistryFrequencyInSeconds, TimeUnit.SECONDS)
.doOnNext(new Action1<Long>() {
@Override
public void call(Long t1) {
final long timestamp = System.currentTimeMillis();
publishMetrics(timestamp, registry.metrics());
}
}).subscribe();
}
public void start(int pollMetricsRegistryFrequencyInSeconds) {
start(pollMetricsRegistryFrequencyInSeconds, new HashMap<String, String>());
}
public void shutdown() {
if (subscription != null) {
subscription.unsubscribe();
}
}
public abstract void publishMetrics(long timestamp, Collection<Metrics> currentMetricsRegistered);
}
| 7,824 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/Metrics.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.common.metrics.spectator.CounterImpl;
import io.mantisrx.common.metrics.spectator.GaugeImpl;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.common.metrics.spectator.MetricId;
import io.mantisrx.common.metrics.spectator.SpectatorRegistryFactory;
import io.mantisrx.common.metrics.spectator.TimerImpl;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Metrics {
private static final Logger logger = LoggerFactory.getLogger(Metrics.class);
private final Builder builder;
private MetricGroupId metricGroup;
private Map<MetricId, Counter> counters = new HashMap<>();
private Map<MetricId, Gauge> gauges = new HashMap<>();
private Map<MetricId, Timer> timers = new HashMap<>();
public Metrics(Builder builder) {
this.builder = builder;
this.metricGroup = builder.metricGroup;
// create spectator counters
if (builder.counterIds != null && builder.counterIds.size() > 0) {
for (MetricId id : builder.counterIds) {
logger.debug("creating spectator counter for {}", id);
counters.put(id, new CounterImpl(id, builder.registry));
}
}
// create gauges
if (builder.callbackGauges != null && builder.callbackGauges.size() > 0) {
for (Gauge gauge : builder.callbackGauges) {
gauges.put(gauge.id(), gauge);
}
}
// create spectator gauges
if (builder.gaugeIds != null && builder.gaugeIds.size() > 0) {
for (MetricId gaugeId : builder.gaugeIds) {
logger.debug("creating spectator gauge for {}", gaugeId);
gauges.put(gaugeId, new GaugeImpl(gaugeId, builder.registry));
}
}
if (builder.timerIds != null && builder.timerIds.size() > 0) {
for (MetricId id : builder.timerIds) {
logger.debug("creating spectator timer for {}", id);
timers.put(id, new TimerImpl(id, builder.registry));
}
}
}
/**
* use {@link #getMetricGroupId()} instead
*
* @return
*/
@Deprecated
public String getMetricGroup() {
return metricGroup.name();
}
public MetricGroupId getMetricGroupId() {
return metricGroup;
}
@Override
public String toString() {
return "Metrics{" +
"metricGroup=" + metricGroup +
", counters=" + counters.keySet() +
", gauges=" + gauges.keySet() +
", timers=" + timers.keySet() +
'}';
}
public Counter getCounter(String metricName) {
Counter counter = counters.get(new MetricId(metricGroup.name(), metricName, metricGroup.tags()));
if (counter == null) {
throw new RuntimeException("No counter registered for metricGroup: " + metricGroup + " with metricName: " + metricName);
}
return counter;
}
public Gauge getGauge(String metricName) {
Gauge gauge = gauges.get(new MetricId(metricGroup.name(), metricName, metricGroup.tags()));
if (gauge == null) {
// For backwards compat
final Gauge legacyGauge = gauges.get(new MetricId(GaugeCallback.LEGACY_GAUGE_CALLBACK_METRICGROUP, metricName));
if (legacyGauge == null) {
throw new RuntimeException("No gauge registered for metricGroup: " + metricGroup + " with name: " + metricName);
} else {
return legacyGauge;
}
}
return gauge;
}
public Timer getTimer(String metricName) {
Timer timer = timers.get(new MetricId(metricGroup.name(), metricName, metricGroup.tags()));
if (timer == null) {
throw new RuntimeException("No timer registered for metriGroup: " + metricGroup + " with name: " + metricName);
}
return timer;
}
public Counter getCounter(final String metricName, final Tag... tags) {
Counter counter = counters.get(new MetricId(metricGroup.name(), metricName, tags));
if (counter == null) {
throw new RuntimeException("No counter registered for metricGroup: " + metricGroup + " with metricName: " + metricName);
}
return counter;
}
public Gauge getGauge(final String metricName, final Tag... tags) {
Gauge gauge = gauges.get(new MetricId(metricGroup.name(), metricName, tags));
if (gauge == null) {
throw new RuntimeException("No gauge registered for metricGroup: " + this.metricGroup + " with metricName: " + metricName);
}
return gauge;
}
public Timer getTimer(final String metricName, final Tag... tags) {
Timer timer = timers.get(new MetricId(metricGroup.name(), metricName, tags));
if (timer == null) {
throw new RuntimeException("No timer registered for metricGroup: " + this.metricGroup + " with metricName: " + metricName);
}
return timer;
}
/**
* @return Note, point in time copy, if counters are add after copy they
* will not be reflected in this data structure.
*/
public Map<MetricId, Counter> counters() {
return Collections.unmodifiableMap(counters);
}
/**
* @return Note, point in time copy, if gauge are add after copy they
* will not be reflected in this data structure.
*/
public Map<MetricId, Gauge> gauges() {
return Collections.unmodifiableMap(gauges);
}
public Map<MetricId, Timer> timers() {
return Collections.unmodifiableMap(timers);
}
public static class Builder {
private final Registry registry;
private final Set<Gauge> callbackGauges = new HashSet<>();
private final Set<MetricId> counterIds = new HashSet<>();
private final Set<MetricId> gaugeIds = new HashSet<>();
private final Set<MetricId> timerIds = new HashSet<>();
private MetricGroupId metricGroup;
public Builder() {
this(SpectatorRegistryFactory.getRegistry());
}
public Builder(final Registry registry) {
this.registry = registry;
}
public Builder name(final String metricGroup) {
this.metricGroup = new MetricGroupId(metricGroup);
return this;
}
public Builder id(final MetricGroupId metricGroup) {
this.metricGroup = metricGroup;
return this;
}
public Builder id(final String metricGroup, final Collection<Tag> groupTags) {
this.metricGroup = new MetricGroupId(metricGroup, groupTags);
return this;
}
public Builder id(final String metricGroup, final Tag... groupTags) {
this.metricGroup = new MetricGroupId(metricGroup, groupTags);
return this;
}
public Builder addCounter(final String metricName) {
Preconditions.checkNotNull(metricGroup, "set metric group id with id(String, Tag...) before adding Counter");
counterIds.add(new MetricId(metricGroup.name(), metricName, metricGroup.tags()));
return this;
}
@Deprecated
public Builder addCounter(final String metricName, final Iterable<Tag> overrideGroupTags) {
Preconditions.checkNotNull(metricGroup, "set metric group id with id(String, Tag...) before adding Counter");
counterIds.add(new MetricId(metricGroup.name(), metricName, overrideGroupTags));
return this;
}
@Deprecated
public Builder addCounter(final String metricName, final Tag... overrideGroupTags) {
Preconditions.checkNotNull(metricGroup, "set metric group id with id(String, Tag...) before adding Counter");
counterIds.add(new MetricId(metricGroup.name(), metricName, overrideGroupTags));
return this;
}
public Builder addGauge(final String metricName) {
Preconditions.checkNotNull(metricGroup, "set metric group id with id(String, Tag...) before adding Gauge");
gaugeIds.add(new MetricId(metricGroup.name(), metricName, metricGroup.tags()));
return this;
}
@Deprecated
public Builder addGauge(final String metricName, final Iterable<Tag> overrideGroupTags) {
Preconditions.checkNotNull(metricGroup, "set metric group id with id(String, Tag...) before adding Gauge");
gaugeIds.add(new MetricId(metricGroup.name(), metricName, overrideGroupTags));
return this;
}
@Deprecated
public Builder addGauge(final String metricName, final Tag... overrideGroupTags) {
Preconditions.checkNotNull(metricGroup, "set metric group id with id(String, Tag...) before adding Gauge");
gaugeIds.add(new MetricId(metricGroup.name(), metricName, overrideGroupTags));
return this;
}
public Builder addGauge(final Gauge callbackGauge) {
Preconditions.checkNotNull(metricGroup, "set metric group id with id(String, Tag...) before adding Gauge");
callbackGauges.add(callbackGauge);
return this;
}
public Builder addTimer(final String metricName) {
Preconditions.checkNotNull(metricGroup, "set metric group id with id(String, Tag...) before adding Timer");
timerIds.add(new MetricId(metricGroup.name(), metricName, metricGroup.tags()));
return this;
}
public Metrics build() {
if (metricGroup == null || metricGroup.name().length() == 0) {
throw new IllegalArgumentException("metricGroup must be specified for metrics");
}
return new Metrics(this);
}
}
}
| 7,825 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/measurement/Measurements.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.measurement;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Collection;
import java.util.Map;
public class Measurements {
private final Map<String, String> tags;
private String name;
private long timestamp;
private Collection<CounterMeasurement> counters;
private Collection<GaugeMeasurement> gauges;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public Measurements(
@JsonProperty("name") String name,
@JsonProperty("timestamp") long timestamp,
@JsonProperty("counters") Collection<CounterMeasurement> counters,
@JsonProperty("gauges") Collection<GaugeMeasurement> gauges,
@JsonProperty("tags") Map<String, String> tags) {
this.name = name;
this.timestamp = timestamp;
this.counters = counters;
this.gauges = gauges;
this.tags = tags;
}
public String getName() {
return name;
}
public long getTimestamp() {
return timestamp;
}
public Collection<CounterMeasurement> getCounters() {
return counters;
}
public Collection<GaugeMeasurement> getGauges() {
return gauges;
}
public Map<String, String> getTags() {
return tags;
}
@Override
public String toString() {
return "Measurements{" +
"name='" + name + '\'' +
", timestamp=" + timestamp +
", tags=" + tags +
", counters=" + counters +
", gauges=" + gauges +
'}';
}
}
| 7,826 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/measurement/CounterMeasurement.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.measurement;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
public class CounterMeasurement {
private String event;
private long count;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public CounterMeasurement(@JsonProperty("event") String event,
@JsonProperty("count") long count) {
this.event = event;
this.count = count;
}
public String getEvent() {
return event;
}
public long getCount() {
return count;
}
@Override
public String toString() {
return "CounterMeasurement{" +
"event='" + event + '\'' +
", count=" + count +
'}';
}
}
| 7,827 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/measurement/GaugeMeasurement.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.measurement;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
public class GaugeMeasurement {
private String event;
private double value;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public GaugeMeasurement(@JsonProperty("event") String event,
@JsonProperty("value") double value) {
this.event = event;
this.value = value;
}
public String getEvent() {
return event;
}
public double getValue() {
return value;
}
@Override
public String toString() {
return "GaugeMeasurement{" +
"event='" + event + '\'' +
", value=" + value +
'}';
}
}
| 7,828 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/netty/MantisNettyEventsListenerFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.netty;
import mantis.io.reactivex.netty.client.ClientMetricsEvent;
import mantis.io.reactivex.netty.client.RxClient;
import mantis.io.reactivex.netty.metrics.MetricEventsListener;
import mantis.io.reactivex.netty.metrics.MetricEventsListenerFactory;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.server.HttpServer;
import mantis.io.reactivex.netty.protocol.http.websocket.WebSocketClient;
import mantis.io.reactivex.netty.protocol.http.websocket.WebSocketServer;
import mantis.io.reactivex.netty.protocol.udp.client.UdpClient;
import mantis.io.reactivex.netty.protocol.udp.server.UdpServer;
import mantis.io.reactivex.netty.server.RxServer;
import mantis.io.reactivex.netty.server.ServerMetricsEvent;
/**
* @author Neeraj Joshi
*/
public class MantisNettyEventsListenerFactory extends MetricEventsListenerFactory {
private final String clientMetricNamePrefix;
private final String serverMetricNamePrefix;
public MantisNettyEventsListenerFactory() {
this("mantis-rxnetty-client-", "mantis-rxnetty-server-");
}
public MantisNettyEventsListenerFactory(String clientMetricNamePrefix, String serverMetricNamePrefix) {
this.clientMetricNamePrefix = clientMetricNamePrefix;
this.serverMetricNamePrefix = serverMetricNamePrefix;
}
@Override
public TcpClientListener<ClientMetricsEvent<ClientMetricsEvent.EventType>> forTcpClient(@SuppressWarnings("rawtypes") RxClient client) {
return TcpClientListener.newListener(clientMetricNamePrefix + client.name());
}
@Override
public HttpClientListener forHttpClient(@SuppressWarnings("rawtypes") HttpClient client) {
return HttpClientListener.newHttpListener(clientMetricNamePrefix + client.name());
}
@Override
public UdpClientListener forUdpClient(@SuppressWarnings("rawtypes") UdpClient client) {
return UdpClientListener.newUdpListener(clientMetricNamePrefix + client.name());
}
@Override
public TcpServerListener<ServerMetricsEvent<ServerMetricsEvent.EventType>> forTcpServer(@SuppressWarnings("rawtypes") RxServer server) {
return TcpServerListener.newListener(serverMetricNamePrefix + server.getServerPort());
}
@Override
public HttpServerListener forHttpServer(@SuppressWarnings("rawtypes") HttpServer server) {
return HttpServerListener.newHttpListener(serverMetricNamePrefix + server.getServerPort());
}
@Override
public UdpServerListener forUdpServer(@SuppressWarnings("rawtypes") UdpServer server) {
return UdpServerListener.newUdpListener(serverMetricNamePrefix + server.getServerPort());
}
@Override
public MetricEventsListener<ClientMetricsEvent<?>> forWebSocketClient(
WebSocketClient client) {
// TODO Auto-generated method stub
return null;
}
@Override
public MetricEventsListener<ServerMetricsEvent<?>> forWebSocketServer(
WebSocketServer server) {
// TODO Auto-generated method stub
return null;
}
}
| 7,829 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/netty/TcpClientListener.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.netty;
import static com.mantisrx.common.utils.MantisMetricStringConstants.GROUP_ID_TAG;
import com.netflix.spectator.api.BasicTag;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import mantis.io.reactivex.netty.client.ClientMetricsEvent;
import mantis.io.reactivex.netty.metrics.ClientMetricEventsListener;
/**
* @author Neeraj Joshi
*/
public class TcpClientListener<T extends ClientMetricsEvent<?>> extends ClientMetricEventsListener<T> {
private final Gauge liveConnections;
private final Counter connectionCount;
private final Gauge pendingConnects;
private final Counter failedConnects;
// private Timer connectionTimes;
private final Gauge pendingConnectionClose;
private final Counter failedConnectionClose;
private final Gauge pendingPoolAcquires;
private final Counter failedPoolAcquires;
//private Timer poolAcquireTimes;
private final Gauge pendingPoolReleases;
private final Counter failedPoolReleases;
//private Timer poolReleaseTimes;
private final Counter poolAcquires;
private final Counter poolEvictions;
private final Counter poolReuse;
private final Counter poolReleases;
private final Gauge pendingWrites;
private final Gauge pendingFlushes;
private final Counter bytesWritten;
//private Timer writeTimes;
private final Counter bytesRead;
private final Counter failedWrites;
private final Counter failedFlushes;
//private Timer flushTimes;
//private final RefCountingMonitor refCounter;
private final String monitorId;
protected TcpClientListener(String monitorId) {
this.monitorId = monitorId;
final String idValue = Optional.ofNullable(monitorId).orElse("none");
final BasicTag idTag = new BasicTag(GROUP_ID_TAG, idValue);
Metrics m = new Metrics.Builder()
.id("tcpClient", idTag)
.addGauge("liveConnections")
.addCounter("connectionCount")
.addGauge("pendingConnects")
.addCounter("failedConnects")
.addGauge("pendingConnectionClose")
.addCounter("failedConnectionClose")
.addGauge("pendingPoolAcquires")
.addCounter("failedPoolAcquires")
.addGauge("pendingPoolReleases")
.addCounter("failedPoolReleases")
.addCounter("poolAcquires")
.addCounter("poolEvictions")
.addCounter("poolReuse")
.addCounter("poolReleases")
.addGauge("pendingWrites")
.addGauge("pendingFlushes")
.addCounter("bytesWritten")
.addCounter("bytesRead")
.addCounter("failedWrites")
.addCounter("failedFlushes")
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
//refCounter = new RefCountingMonitor(monitorId);
liveConnections = m.getGauge("liveConnections");
connectionCount = m.getCounter("connectionCount");
pendingConnects = m.getGauge("pendingConnects");
failedConnects = m.getCounter("failedConnects");
//connectionTimes = newTimer("connectionTimes");
pendingConnectionClose = m.getGauge("pendingConnectionClose");
failedConnectionClose = m.getCounter("failedConnectionClose");
pendingPoolAcquires = m.getGauge("pendingPoolAcquires");
//poolAcquireTimes = newTimer("poolAcquireTimes");
failedPoolAcquires = m.getCounter("failedPoolAcquires");
pendingPoolReleases = m.getGauge("pendingPoolReleases");
//poolReleaseTimes = newTimer("poolReleaseTimes");
failedPoolReleases = m.getCounter("failedPoolReleases");
poolAcquires = m.getCounter("poolAcquires");
poolEvictions = m.getCounter("poolEvictions");
poolReuse = m.getCounter("poolReuse");
poolReleases = m.getCounter("poolReleases");
pendingWrites = m.getGauge("pendingWrites");
pendingFlushes = m.getGauge("pendingFlushes");
bytesWritten = m.getCounter("bytesWritten");
//writeTimes = newTimer("writeTimes");
bytesRead = m.getCounter("bytesRead");
failedWrites = m.getCounter("failedWrites");
failedFlushes = m.getCounter("failedFlushes");
//flushTimes = newTimer("flushTimes");
}
public static TcpClientListener<ClientMetricsEvent<ClientMetricsEvent.EventType>> newListener(String monitorId) {
return new TcpClientListener<ClientMetricsEvent<ClientMetricsEvent.EventType>>(monitorId);
}
@Override
protected void onByteRead(long bytesRead) {
this.bytesRead.increment(bytesRead);
}
@Override
protected void onFlushFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
pendingFlushes.decrement();
failedFlushes.increment();
}
@Override
protected void onFlushSuccess(long duration, TimeUnit timeUnit) {
pendingFlushes.decrement();
//flushTimes.record(duration, timeUnit);
}
@Override
protected void onFlushStart() {
pendingFlushes.increment();
}
@Override
protected void onWriteFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
pendingWrites.decrement();
failedWrites.increment();
}
@Override
protected void onWriteSuccess(long duration, TimeUnit timeUnit, long bytesWritten) {
pendingWrites.decrement();
this.bytesWritten.increment(bytesWritten);
//writeTimes.record(duration, timeUnit);
}
@Override
protected void onWriteStart() {
pendingWrites.increment();
}
@Override
protected void onPoolReleaseFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
pendingPoolReleases.decrement();
poolReleases.increment();
failedPoolReleases.increment();
}
@Override
protected void onPoolReleaseSuccess(long duration, TimeUnit timeUnit) {
pendingPoolReleases.decrement();
poolReleases.increment();
// poolReleaseTimes.record(duration, timeUnit);
}
@Override
protected void onPoolReleaseStart() {
pendingPoolReleases.increment();
}
@Override
protected void onPooledConnectionEviction() {
poolEvictions.increment();
}
@Override
protected void onPooledConnectionReuse(long duration, TimeUnit timeUnit) {
poolReuse.increment();
}
@Override
protected void onPoolAcquireFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
pendingPoolAcquires.decrement();
poolAcquires.increment();
failedPoolAcquires.increment();
}
@Override
protected void onPoolAcquireSuccess(long duration, TimeUnit timeUnit) {
pendingPoolAcquires.decrement();
poolAcquires.increment();
// poolAcquireTimes.record(duration, timeUnit);
}
@Override
protected void onPoolAcquireStart() {
pendingPoolAcquires.increment();
}
@Override
protected void onConnectionCloseFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
liveConnections.decrement(); // Even though the close failed, the connection isn't live.
pendingConnectionClose.decrement();
failedConnectionClose.increment();
}
@Override
protected void onConnectionCloseSuccess(long duration, TimeUnit timeUnit) {
liveConnections.decrement();
pendingConnectionClose.decrement();
}
@Override
protected void onConnectionCloseStart() {
pendingConnectionClose.increment();
}
@Override
protected void onConnectFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
pendingConnects.decrement();
failedConnects.increment();
}
@Override
protected void onConnectSuccess(long duration, TimeUnit timeUnit) {
pendingConnects.decrement();
liveConnections.increment();
connectionCount.increment();
// connectionTimes.record(duration, timeUnit);
}
@Override
protected void onConnectStart() {
pendingConnects.increment();
}
@Override
public void onCompleted() {
}
@Override
public void onSubscribe() {
}
}
| 7,830 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/netty/TcpServerListener.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.netty;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import java.util.concurrent.TimeUnit;
import mantis.io.reactivex.netty.metrics.ServerMetricEventsListener;
import mantis.io.reactivex.netty.server.ServerMetricsEvent;
/**
* @author Neeraj Joshi
*/
public class TcpServerListener<T extends ServerMetricsEvent<?>> extends ServerMetricEventsListener<T> {
private final Gauge liveConnections;
private final Gauge inflightConnections;
private final Counter failedConnections;
//private final Timer connectionProcessingTimes;
private final Gauge pendingConnectionClose;
private final Counter failedConnectionClose;
//private final Timer connectionCloseTimes;
private final Gauge pendingWrites;
private final Gauge pendingFlushes;
private final Counter bytesWritten;
//private final Timer writeTimes;
private final Counter bytesRead;
private final Counter failedWrites;
private final Counter failedFlushes;
//private final Timer flushTimes;
protected TcpServerListener(String monitorId) {
Metrics m = new Metrics.Builder()
.name("tcpServer_" + monitorId)
.addGauge("liveConnections")
.addGauge("inflightConnections")
.addCounter("failedConnections")
.addGauge("pendingConnectionClose")
.addCounter("failedConnectionClose")
.addGauge("pendingWrites")
.addGauge("pendingFlushes")
.addCounter("bytesWritten")
.addCounter("bytesRead")
.addCounter("failedWrites")
.addCounter("failedFlushes")
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
liveConnections = m.getGauge("liveConnections");
inflightConnections = m.getGauge("inflightConnections");
failedConnections = m.getCounter("failedConnections");
pendingConnectionClose = m.getGauge("pendingConnectionClose");
failedConnectionClose = m.getCounter("failedConnectionClose");
// connectionProcessingTimes = newTimer("connectionProcessingTimes");
// connectionCloseTimes = newTimer("connectionCloseTimes");
pendingWrites = m.getGauge("pendingWrites");
pendingFlushes = m.getGauge("pendingFlushes");
bytesWritten = m.getCounter("bytesWritten");
// writeTimes = newTimer("writeTimes");
bytesRead = m.getCounter("bytesRead");
failedWrites = m.getCounter("failedWrites");
failedFlushes = m.getCounter("failedFlushes");
// flushTimes = newTimer("flushTimes");
}
public static TcpServerListener<ServerMetricsEvent<ServerMetricsEvent.EventType>> newListener(String monitorId) {
return new TcpServerListener<ServerMetricsEvent<ServerMetricsEvent.EventType>>(monitorId);
}
@Override
protected void onConnectionHandlingFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
inflightConnections.decrement();
failedConnections.increment();
}
@Override
protected void onConnectionHandlingSuccess(long duration, TimeUnit timeUnit) {
inflightConnections.decrement();
//connectionProcessingTimes.record(duration, timeUnit);
}
@Override
protected void onConnectionHandlingStart(long duration, TimeUnit timeUnit) {
inflightConnections.increment();
}
@Override
protected void onConnectionCloseStart() {
pendingConnectionClose.increment();
}
@Override
protected void onConnectionCloseSuccess(long duration, TimeUnit timeUnit) {
liveConnections.decrement();
pendingConnectionClose.decrement();
//connectionCloseTimes.record(duration, timeUnit);
}
@Override
protected void onConnectionCloseFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
liveConnections.decrement();
pendingConnectionClose.decrement();
// connectionCloseTimes.record(duration, timeUnit);
failedConnectionClose.increment();
}
@Override
protected void onNewClientConnected() {
liveConnections.increment();
}
@Override
protected void onByteRead(long bytesRead) {
this.bytesRead.increment(bytesRead);
}
@Override
protected void onFlushFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
pendingFlushes.decrement();
failedFlushes.increment();
}
@Override
protected void onFlushSuccess(long duration, TimeUnit timeUnit) {
pendingFlushes.decrement();
// flushTimes.record(duration, timeUnit);
}
@Override
protected void onFlushStart() {
pendingFlushes.increment();
}
@Override
protected void onWriteFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
pendingWrites.decrement();
failedWrites.increment();
}
@Override
protected void onWriteSuccess(long duration, TimeUnit timeUnit, long bytesWritten) {
pendingWrites.decrement();
this.bytesWritten.increment(bytesWritten);
//writeTimes.record(duration, timeUnit);
}
@Override
protected void onWriteStart() {
pendingWrites.increment();
}
@Override
public void onCompleted() {
}
@Override
public void onSubscribe() {
}
public long getLiveConnections() {
return liveConnections.value();
}
public long getInflightConnections() {
return inflightConnections.value();
}
public long getFailedConnections() {
return failedConnections.value();
}
// public Timer getConnectionProcessingTimes() {
// return connectionProcessingTimes;
// }
public long getPendingWrites() {
return pendingWrites.value();
}
public long getPendingFlushes() {
return pendingFlushes.value();
}
public long getBytesWritten() {
return bytesWritten.value();
}
// public Timer getWriteTimes() {
// return writeTimes;
// }
public long getBytesRead() {
return bytesRead.value();
}
public long getFailedWrites() {
return failedWrites.value();
}
public long getFailedFlushes() {
return failedFlushes.value();
}
// public Timer getFlushTimes() {
// return flushTimes;
// }
}
| 7,831 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/netty/HttpServerListener.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.netty;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import java.util.concurrent.TimeUnit;
import mantis.io.reactivex.netty.metrics.HttpServerMetricEventsListener;
import mantis.io.reactivex.netty.server.ServerMetricsEvent;
/**
* @author Neeraj Joshi
*/
public class HttpServerListener extends TcpServerListener<ServerMetricsEvent<?>> {
private final Gauge requestBacklog;
private final Gauge inflightRequests;
private final Counter processedRequests;
private final Counter failedRequests;
private final Counter responseWriteFailed;
// private final Timer responseWriteTimes;
// private final Timer requestReadTimes;
private final HttpServerMetricEventsListenerImpl delegate;
protected HttpServerListener(String monitorId) {
super(monitorId);
Metrics m = new Metrics.Builder()
.name("httpServer_" + monitorId)
.addGauge("requestBacklog")
.addGauge("inflightRequests")
.addCounter("processedRequests")
.addCounter("failedRequests")
.addCounter("responseWriteFailed")
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
requestBacklog = m.getGauge("requestBacklog");
inflightRequests = m.getGauge("inflightRequests");
// responseWriteTimes = newTimer("responseWriteTimes");
// requestReadTimes = newTimer("requestReadTimes");
processedRequests = m.getCounter("processedRequests");
failedRequests = m.getCounter("failedRequests");
responseWriteFailed = m.getCounter("responseWriteFailed");
delegate = new HttpServerMetricEventsListenerImpl();
}
public static HttpServerListener newHttpListener(String monitorId) {
return new HttpServerListener(monitorId);
}
@Override
public void onEvent(ServerMetricsEvent<?> event, long duration, TimeUnit timeUnit, Throwable throwable,
Object value) {
delegate.onEvent(event, duration, timeUnit, throwable, value);
}
public long getRequestBacklog() {
return requestBacklog.value();
}
public long getInflightRequests() {
return inflightRequests.value();
}
public long getProcessedRequests() {
return processedRequests.value();
}
public long getFailedRequests() {
return failedRequests.value();
}
// public Timer getResponseWriteTimes() {
// return responseWriteTimes;
// }
//
// public Timer getRequestReadTimes() {
// return requestReadTimes;
// }
public long getResponseWriteFailed() {
return responseWriteFailed.value();
}
private class HttpServerMetricEventsListenerImpl extends HttpServerMetricEventsListener {
@Override
protected void onRequestHandlingFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
processedRequests.increment();
inflightRequests.decrement();
failedRequests.increment();
}
@Override
protected void onRequestHandlingSuccess(long duration, TimeUnit timeUnit) {
inflightRequests.decrement();
processedRequests.increment();
}
@Override
protected void onResponseContentWriteSuccess(long duration, TimeUnit timeUnit) {
// responseWriteTimes.record(duration, timeUnit);
}
@Override
protected void onResponseHeadersWriteSuccess(long duration, TimeUnit timeUnit) {
// responseWriteTimes.record(duration, timeUnit);
}
@Override
protected void onResponseContentWriteFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
responseWriteFailed.increment();
}
@Override
protected void onResponseHeadersWriteFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
responseWriteFailed.increment();
}
@Override
protected void onRequestReceiveComplete(long duration, TimeUnit timeUnit) {
// requestReadTimes.record(duration, timeUnit);
}
@Override
protected void onRequestHandlingStart(long duration, TimeUnit timeUnit) {
requestBacklog.decrement();
}
@Override
protected void onNewRequestReceived() {
requestBacklog.increment();
inflightRequests.increment();
}
@Override
protected void onConnectionHandlingFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
HttpServerListener.this.onConnectionHandlingFailed(duration, timeUnit, throwable);
}
@Override
protected void onConnectionHandlingSuccess(long duration, TimeUnit timeUnit) {
HttpServerListener.this.onConnectionHandlingSuccess(duration, timeUnit);
}
@Override
protected void onConnectionHandlingStart(long duration, TimeUnit timeUnit) {
HttpServerListener.this.onConnectionHandlingStart(duration, timeUnit);
}
@Override
protected void onNewClientConnected() {
HttpServerListener.this.onNewClientConnected();
}
@Override
protected void onByteRead(long bytesRead) {
HttpServerListener.this.onByteRead(bytesRead);
}
@Override
protected void onFlushFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
HttpServerListener.this.onFlushFailed(duration, timeUnit, throwable);
}
@Override
protected void onFlushSuccess(long duration, TimeUnit timeUnit) {
HttpServerListener.this.onFlushSuccess(duration, timeUnit);
}
@Override
protected void onFlushStart() {
HttpServerListener.this.onFlushStart();
}
@Override
protected void onWriteFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
HttpServerListener.this.onWriteFailed(duration, timeUnit, throwable);
}
@Override
protected void onWriteSuccess(long duration, TimeUnit timeUnit, long bytesWritten) {
HttpServerListener.this.onWriteSuccess(duration, timeUnit, bytesWritten);
}
@Override
protected void onWriteStart() {
HttpServerListener.this.onWriteStart();
}
}
}
| 7,832 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/netty/HttpClientListener.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.netty;
import static com.mantisrx.common.utils.MantisMetricStringConstants.GROUP_ID_TAG;
import com.netflix.spectator.api.BasicTag;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import mantis.io.reactivex.netty.client.ClientMetricsEvent;
import mantis.io.reactivex.netty.metrics.HttpClientMetricEventsListener;
/**
* @author Neeraj Joshi
*/
public class HttpClientListener extends TcpClientListener<ClientMetricsEvent<?>> {
private final Gauge requestBacklog;
private final Gauge inflightRequests;
private final Counter processedRequests;
private final Counter requestWriteFailed;
private final Counter failedResponses;
// private final Timer requestWriteTimes;
// private final Timer responseReadTimes;
// private final Timer requestProcessingTimes;
private final HttpClientMetricEventsListenerImpl delegate = new HttpClientMetricEventsListenerImpl();
protected HttpClientListener(String monitorId) {
super(monitorId);
final String metricsGroup = "httpClient";
final String idValue = Optional.ofNullable(monitorId).orElse("none");
final BasicTag idTag = new BasicTag(GROUP_ID_TAG, idValue);
Metrics m = new Metrics.Builder()
.id(metricsGroup, idTag)
.addGauge("requestBacklog")
.addGauge("inflightRequests")
.addCounter("processedRequests")
.addCounter("requestWriteFailed")
.addCounter("failedResponses")
.build();
requestBacklog = m.getGauge("requestBacklog");
inflightRequests = m.getGauge("inflightRequests");
// requestWriteTimes = newTimer("requestWriteTimes");
// responseReadTimes = newTimer("responseReadTimes");
processedRequests = m.getCounter("processedRequests");
requestWriteFailed = m.getCounter("requestWriteFailed");
failedResponses = m.getCounter("failedResponses");
// requestProcessingTimes = newTimer("requestProcessingTimes");
}
public static HttpClientListener newHttpListener(String monitorId) {
return new HttpClientListener(monitorId);
}
@Override
public void onEvent(ClientMetricsEvent<?> event, long duration, TimeUnit timeUnit, Throwable throwable,
Object value) {
delegate.onEvent(event, duration, timeUnit, throwable, value);
}
public long getRequestBacklog() {
return (long) requestBacklog.doubleValue();
}
public long getInflightRequests() {
return (long) inflightRequests.doubleValue();
}
public long getProcessedRequests() {
return processedRequests.value();
}
public long getRequestWriteFailed() {
return requestWriteFailed.value();
}
public long getFailedResponses() {
return failedResponses.value();
}
// public Timer getRequestWriteTimes() {
// return requestWriteTimes;
// }
//
// public Timer getResponseReadTimes() {
// return responseReadTimes;
// }
private class HttpClientMetricEventsListenerImpl extends HttpClientMetricEventsListener {
@Override
protected void onRequestProcessingComplete(long duration, TimeUnit timeUnit) {
// requestProcessingTimes.record(duration, timeUnit);
}
@Override
protected void onResponseReceiveComplete(long duration, TimeUnit timeUnit) {
inflightRequests.decrement();
processedRequests.increment();
// responseReadTimes.record(duration, timeUnit);
}
@Override
protected void onResponseFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
inflightRequests.decrement();
processedRequests.increment();
failedResponses.increment();
}
@Override
protected void onRequestWriteComplete(long duration, TimeUnit timeUnit) {
//requestWriteTimes.record(duration, timeUnit);
}
@Override
protected void onRequestContentWriteFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
requestWriteFailed.increment();
}
@Override
protected void onRequestHeadersWriteFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
requestWriteFailed.increment();
}
@Override
protected void onRequestHeadersWriteStart() {
requestBacklog.decrement();
}
@Override
protected void onRequestSubmitted() {
requestBacklog.increment();
inflightRequests.increment();
}
@Override
protected void onByteRead(long bytesRead) {
HttpClientListener.this.onByteRead(bytesRead);
}
@Override
protected void onFlushFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
HttpClientListener.this.onFlushFailed(duration, timeUnit, throwable);
}
@Override
protected void onFlushSuccess(long duration, TimeUnit timeUnit) {
HttpClientListener.this.onFlushSuccess(duration, timeUnit);
}
@Override
protected void onFlushStart() {
HttpClientListener.this.onFlushStart();
}
@Override
protected void onWriteFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
HttpClientListener.this.onWriteFailed(duration, timeUnit, throwable);
}
@Override
protected void onWriteSuccess(long duration, TimeUnit timeUnit, long bytesWritten) {
HttpClientListener.this.onWriteSuccess(duration, timeUnit, bytesWritten);
}
@Override
protected void onWriteStart() {
HttpClientListener.this.onWriteStart();
}
@Override
protected void onPoolReleaseFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
HttpClientListener.this.onPoolReleaseFailed(duration, timeUnit, throwable);
}
@Override
protected void onPoolReleaseSuccess(long duration, TimeUnit timeUnit) {
HttpClientListener.this.onPoolReleaseSuccess(duration, timeUnit);
}
@Override
protected void onPoolReleaseStart() {
HttpClientListener.this.onPoolReleaseStart();
}
@Override
protected void onPooledConnectionEviction() {
HttpClientListener.this.onPooledConnectionEviction();
}
@Override
protected void onPooledConnectionReuse(long duration, TimeUnit timeUnit) {
HttpClientListener.this.onPooledConnectionReuse(duration, timeUnit);
}
@Override
protected void onPoolAcquireFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
HttpClientListener.this.onPoolAcquireFailed(duration, timeUnit, throwable);
}
@Override
protected void onPoolAcquireSuccess(long duration, TimeUnit timeUnit) {
HttpClientListener.this.onPoolAcquireSuccess(duration, timeUnit);
}
@Override
protected void onPoolAcquireStart() {
HttpClientListener.this.onPoolAcquireStart();
}
@Override
protected void onConnectionCloseFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
HttpClientListener.this.onConnectionCloseFailed(duration, timeUnit, throwable);
}
@Override
protected void onConnectionCloseSuccess(long duration, TimeUnit timeUnit) {
HttpClientListener.this.onConnectionCloseSuccess(duration, timeUnit);
}
@Override
protected void onConnectionCloseStart() {
HttpClientListener.this.onConnectionCloseStart();
}
@Override
protected void onConnectFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
HttpClientListener.this.onConnectFailed(duration, timeUnit, throwable);
}
@Override
protected void onConnectSuccess(long duration, TimeUnit timeUnit) {
HttpClientListener.this.onConnectSuccess(duration, timeUnit);
}
@Override
protected void onConnectStart() {
HttpClientListener.this.onConnectStart();
}
}
}
| 7,833 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/netty/UdpClientListener.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.netty;
import java.util.concurrent.TimeUnit;
import mantis.io.reactivex.netty.client.ClientMetricsEvent;
/**
* @author Neeraj Joshi
*/
public class UdpClientListener extends TcpClientListener<ClientMetricsEvent<?>> {
protected UdpClientListener(String monitorId) {
super(monitorId);
}
public static UdpClientListener newUdpListener(String monitorId) {
return new UdpClientListener(monitorId);
}
@Override
public void onEvent(ClientMetricsEvent<?> event, long duration, TimeUnit timeUnit, Throwable throwable,
Object value) {
if (event.getType() instanceof ClientMetricsEvent.EventType) {
super.onEvent(event, duration, timeUnit, throwable, value);
}
}
}
| 7,834 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/netty/UdpServerListener.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.netty;
import java.util.concurrent.TimeUnit;
import mantis.io.reactivex.netty.server.ServerMetricsEvent;
/**
* @author Neeraj Joshi
*/
public class UdpServerListener extends TcpServerListener<ServerMetricsEvent<?>> {
protected UdpServerListener(String monitorId) {
super(monitorId);
}
public static UdpServerListener newUdpListener(String monitorId) {
return new UdpServerListener(monitorId);
}
@Override
public void onEvent(ServerMetricsEvent<?> event, long duration, TimeUnit timeUnit, Throwable throwable,
Object value) {
if (event.getType() instanceof ServerMetricsEvent.EventType) {
super.onEvent(event, duration, timeUnit, throwable, value);
}
}
}
| 7,835 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/spectator/CounterImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.spectator;
import com.netflix.spectator.api.Registry;
import io.mantisrx.common.metrics.Counter;
public class CounterImpl implements Counter {
private final MetricId id;
private final com.netflix.spectator.api.Counter spectatorCounter;
public CounterImpl(final MetricId id,
final Registry registry) {
this.id = id;
this.spectatorCounter = registry.counter(id.getSpectatorId(registry));
}
@Override
public void increment() {
spectatorCounter.increment();
}
@Override
public void increment(long x) {
if (x < 0)
throw new IllegalArgumentException("Can't add negative numbers");
spectatorCounter.increment(x);
}
@Override
public long value() {
return spectatorCounter.count();
}
@Override
public long rateValue() {
return -1;
}
@Override
public long rateTimeInMilliseconds() {
return -1;
}
@Override
public String event() {
return spectatorCounter.id().toString();
}
@Override
public MetricId id() {
return id;
}
}
| 7,836 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/spectator/GaugeCallback.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.spectator;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.api.patterns.PolledMeter;
import io.mantisrx.common.metrics.Gauge;
import rx.functions.Func0;
public class GaugeCallback implements Gauge {
private final MetricId metricId;
private final Id spectatorId;
private Func0<Double> valueCallback;
public GaugeCallback(final MetricId metricId,
final Func0<Double> valueCallback,
final Registry registry) {
this.metricId = metricId;
this.spectatorId = metricId.getSpectatorId(registry);
PolledMeter.using(registry).withId(spectatorId).monitorValue(this, GaugeCallback::doubleValue);
this.valueCallback = valueCallback;
}
public GaugeCallback(final String metricGroup,
final String metricName,
final Func0<Double> valueCallback,
final Registry registry,
final Iterable<Tag> tags) {
this(new MetricId(metricGroup, metricName, tags), valueCallback, registry);
}
public GaugeCallback(final String metricGroup,
final String metricName,
final Func0<Double> valueCallback,
final Registry registry,
final Tag... tags) {
this(new MetricId(metricGroup, metricName, tags), valueCallback, registry);
}
public GaugeCallback(final String metricGroup,
final String metricName,
final Func0<Double> valueCallback,
final Tag... tags) {
this(new MetricId(metricGroup, metricName, tags), valueCallback, SpectatorRegistryFactory.getRegistry());
}
public GaugeCallback(final MetricGroupId metricGroup,
final String metricName,
final Func0<Double> valueCallback) {
this(new MetricId(metricGroup.name(), metricName, metricGroup.tags()), valueCallback, SpectatorRegistryFactory.getRegistry());
}
@Override
public MetricId id() {
return metricId;
}
@Override
public String event() {
return spectatorId.toString();
}
@Override
public long value() {
return valueCallback.call().longValue();
}
@Override
public double doubleValue() {
return valueCallback.call();
}
@Override
public void increment() {}
@Override
public void decrement() {}
@Override
public void set(double value) {}
@Override
public void increment(double value) {}
@Override
public void decrement(double value) {}
@Override
public void set(long value) {}
@Override
public void increment(long value) {}
@Override
public void decrement(long value) {}
}
| 7,837 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/spectator/TimerImpl.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.spectator;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Registry;
import io.mantisrx.common.metrics.Timer;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
public class TimerImpl implements Timer {
private final MetricId id;
private final com.netflix.spectator.api.Timer spectatorTimer;
public TimerImpl(final MetricId id,
final Registry registry) {
this.id = id;
this.spectatorTimer = registry.timer(id.getSpectatorId(registry));
}
@Override
public void record(long amount, TimeUnit unit) {
spectatorTimer.record(amount, unit);
}
@Override
public <T> T record(Callable<T> f) throws Exception {
return spectatorTimer.record(f);
}
@Override
public void record(Runnable f) {
spectatorTimer.record(f);
}
@Override
public long count() {
return spectatorTimer.count();
}
@Override
public long totalTime() {
return spectatorTimer.totalTime();
}
@Override
public MetricId id() {
return id;
}
@Override
public Iterable<Measurement> measure() {
return spectatorTimer.measure();
}
@Override
public boolean hasExpired() {
return spectatorTimer.hasExpired();
}
}
| 7,838 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/spectator/GaugeImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.spectator;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.patterns.PolledMeter;
import com.netflix.spectator.impl.AtomicDouble;
import io.mantisrx.common.metrics.Gauge;
public class GaugeImpl implements Gauge {
private final MetricId metricId;
private String event;
private AtomicDouble value;
public GaugeImpl(final MetricId metricId,
final Registry registry) {
this.metricId = metricId;
final Id spectatorId = metricId.getSpectatorId(registry);
this.value = PolledMeter.using(registry)
.withId(spectatorId)
.monitorValue(new AtomicDouble());
this.event = spectatorId.toString();
}
@Override
public String event() {
return event;
}
@Override
public MetricId id() {
return metricId;
}
@Override
public long value() {
return this.value.longValue();
}
@Override
public void set(double value) {
this.value.set(value);
}
@Override
public void increment() {
value.addAndGet(1.0);
}
@Override
public void increment(double delta) {
value.addAndGet(delta);
}
@Override
public void decrement() {
value.getAndAdd(-1.0);
}
@Override
public void decrement(double delta) {
value.getAndAdd((-1) * delta);
}
@Override
public double doubleValue() {
return this.value.get();
}
@Override
public void set(long x) {
this.value.set(x);
}
@Override
public void increment(long x) {
value.getAndAdd(x);
}
@Override
public void decrement(long x) {
value.getAndAdd(-1.0 * x);
}
}
| 7,839 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/spectator/MetricId.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.spectator;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Tag;
import java.util.Arrays;
import java.util.Collections;
public class MetricId {
private final String metricGroup;
private final String metricName;
private final Iterable<Tag> tags;
public MetricId(final String metricGroup, final String metricName) {
this(metricGroup, metricName, Collections.emptyList());
}
public MetricId(final String metricGroup,
final String metricName,
final Iterable<Tag> tags) {
this.metricGroup = metricGroup;
this.metricName = metricName;
this.tags = tags;
}
public MetricId(final String metricGroup,
final String metricName,
final Tag... tags) {
this.metricGroup = metricGroup;
this.metricName = metricName;
this.tags = Arrays.asList(tags);
}
public String metricGroup() {
return metricGroup;
}
public String metricName() {
return metricName;
}
public Iterable<Tag> tags() {
return tags;
}
public Id getSpectatorId(final Registry registry) {
return registry.createId(String.format("%s_%s", metricGroup, metricName), tags);
}
public String metricNameWithTags() {
if (tags.iterator().hasNext()) {
StringBuilder buf = new StringBuilder();
buf.append(metricName);
for (Tag t : tags) {
buf.append(':').append(t.key()).append('=').append(t.value());
}
return buf.toString();
} else {
return metricName;
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MetricId metricId = (MetricId) o;
if (!metricGroup.equals(metricId.metricGroup)) return false;
if (!metricName.equals(metricId.metricName)) return false;
return tags.equals(metricId.tags);
}
@Override
public int hashCode() {
int result = metricGroup.hashCode();
result = 31 * result + metricName.hashCode();
result = 31 * result + tags.hashCode();
return result;
}
@Override
public String toString() {
return "MetricId{" +
"metricGroup='" + metricGroup + '\'' +
", metricName='" + metricName + '\'' +
", tags=" + tags +
'}';
}
}
| 7,840 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/spectator/MetricGroupId.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.spectator;
import com.netflix.spectator.api.Tag;
import java.util.Arrays;
import java.util.Collections;
public class MetricGroupId {
private final String name;
private final Iterable<Tag> tags;
private final String id;
public MetricGroupId(final String name) {
this(name, Collections.emptyList());
}
public MetricGroupId(final String name,
final Iterable<Tag> tags) {
this.name = name;
this.tags = tags;
this.id = createId(name, tags);
}
public MetricGroupId(final String name,
final Tag... tags) {
this(name, Arrays.asList(tags));
}
private String createId(final String name, final Iterable<Tag> tags) {
StringBuilder buf = new StringBuilder();
buf.append(name);
for (Tag t : tags) {
buf.append(':').append(t.key()).append('=').append(t.value());
}
return buf.toString();
}
public String name() {
return name;
}
public Iterable<Tag> tags() {
return tags;
}
public String id() {
return id;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MetricGroupId that = (MetricGroupId) o;
if (name != null ? !name.equals(that.name) : that.name != null) return false;
if (tags != null ? !tags.equals(that.tags) : that.tags != null) return false;
return id != null ? id.equals(that.id) : that.id == null;
}
@Override
public int hashCode() {
int result = name != null ? name.hashCode() : 0;
result = 31 * result + (tags != null ? tags.hashCode() : 0);
result = 31 * result + (id != null ? id.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "MetricGroupId{" +
"name='" + name + '\'' +
", tags=" + tags +
", id='" + id + '\'' +
'}';
}
}
| 7,841 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/spectator/SpectatorRegistryFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.spectator;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Spectator;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SpectatorRegistryFactory {
private static final Logger logger = LoggerFactory.getLogger(SpectatorRegistryFactory.class);
private static final AtomicReference<Registry> registryRef = new AtomicReference<>(null);
public static Registry getRegistry() {
if (registryRef.get() == null) {
return Spectator.globalRegistry();
} else {
return registryRef.get();
}
}
public static void setRegistry(final Registry registry) {
if (registry != null && registryRef.compareAndSet(null, registry)) {
logger.info("spectator registry : {}", registryRef.get().getClass().getCanonicalName());
}
}
} | 7,842 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/metrics/rx/MonitorOperator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.metrics.rx;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable.Operator;
import rx.Subscriber;
import rx.functions.Action0;
import rx.subscriptions.Subscriptions;
public class MonitorOperator<T> implements Operator<T, T> {
private static Logger logger = LoggerFactory.getLogger(MonitorOperator.class);
private final Counter next;
private final Gauge nextGauge;
private final Gauge error;
private final Gauge complete;
private final Gauge subscribe;
private String name;
public MonitorOperator(String name) {
this.name = name;
Metrics m =
new Metrics.Builder()
.name(name)
.addCounter("onNext")
.addGauge("onError")
.addGauge("onComplete")
.addGauge("subscribe")
.addGauge("onNextGauge")
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
next = m.getCounter("onNext");
error = m.getGauge("onError");
complete = m.getGauge("onComplete");
subscribe = m.getGauge("subscribe");
nextGauge = m.getGauge("onNextGauge");
}
@Override
public Subscriber<? super T> call(final Subscriber<? super T> o) {
subscribe.increment();
o.add(Subscriptions.create(new Action0() {
@Override
public void call() {
subscribe.decrement();
}
}));
return new Subscriber<T>(o) {
@Override
public void onCompleted() {
logger.debug("onCompleted() called for monitored observable with name: " + name);
complete.increment();
o.onCompleted();
}
@Override
public void onError(Throwable e) {
logger.error("onError() called for monitored observable with name: " + name, e);
error.increment();
o.onError(e);
}
@Override
public void onNext(T t) {
next.increment();
nextGauge.set(next.value());
o.onNext(t);
}
};
}
}
| 7,843 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/messages/MantisMetaMessage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.messages;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
public abstract class MantisMetaMessage {
final protected ObjectMapper mapper = new ObjectMapper();
public abstract String getType();
public abstract long getTime();
public abstract String getValue();
}
| 7,844 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/messages/MantisMetaDroppedMessage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.messages;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
public class MantisMetaDroppedMessage extends MantisMetaMessage {
private final long time;
@JsonProperty("mantis.meta")
private final String type = "droppedMessageCount";
@JsonProperty("value")
private final String value;
public MantisMetaDroppedMessage(long dropCount, long time) {
this.time = time;
this.value = String.valueOf(dropCount);
}
public static void main(String[] args) {
MantisMetaDroppedMessage m = new MantisMetaDroppedMessage(2, System.currentTimeMillis());
System.out.println("M " + m.toString());
}
@Override
public String getValue() {
return this.value;
}
@Override
public long getTime() {
return this.time;
}
@Override
public String getType() {
return type;
}
@Override
public String toString() {
String str;
try {
str = mapper.writeValueAsString(this);
return str;
} catch (JsonProcessingException e) {
e.printStackTrace();
return "{\"mantis.meta\" : \"error\"" + e.getMessage() + "}";
}
}
}
| 7,845 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/util/DateTimeExt.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.util;
import java.time.Instant;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
public class DateTimeExt {
private static final DateTimeFormatter ISO_UTC_DATE_TIME_FORMATTER = DateTimeFormatter.ISO_LOCAL_DATE_TIME.withZone(ZoneId.of("UTC"));
private DateTimeExt() {
}
/**
* The time given in the argument is scoped to a local (system default) time zone. The result
* is adjusted to UTC time zone.
*/
public static String toUtcDateTimeString(long msSinceEpoch) {
if (msSinceEpoch == 0L) {
return null;
}
return ISO_UTC_DATE_TIME_FORMATTER.format(Instant.ofEpochMilli(msSinceEpoch)) + 'Z';
}
}
| 7,846 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/codec/Decoder.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.codec;
public interface Decoder<T> {
public T decode(byte[] bytes);
}
| 7,847 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/codec/Codecs.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.codec;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectInputStream;
import java.io.ObjectOutput;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
public class Codecs {
public static Codec<Integer> integer() {
return new Codec<Integer>() {
@Override
public Integer decode(byte[] bytes) {
return ByteBuffer.wrap(bytes).getInt();
}
@Override
public byte[] encode(final Integer value) {
return ByteBuffer.allocate(4).putInt(value).array();
}
};
}
public static Codec<Long> longNumber() {
return new Codec<Long>() {
@Override
public Long decode(byte[] bytes) {
return ByteBuffer.wrap(bytes).getLong();
}
@Override
public byte[] encode(final Long value) {
return ByteBuffer.allocate(8).putLong(value).array();
}
};
}
private static Codec<String> stringWithEncoding(String encoding) {
final Charset charset = Charset.forName(encoding);
return new Codec<String>() {
@Override
public String decode(byte[] bytes) {
return new String(bytes, charset);
}
@Override
public byte[] encode(final String value) {
return value.getBytes(charset);
}
};
}
public static Codec<String> stringAscii() {
final Charset charset = Charset.forName("US-ASCII");
return new Codec<String>() {
@Override
public String decode(byte[] bytes) {
return new String(bytes, charset);
}
@Override
public byte[] encode(final String value) {
final byte[] bytes = new byte[value.length()];
for (int i = 0; i < value.length(); i++)
bytes[i] = (byte) value.charAt(i);
return bytes;
}
};
}
public static Codec<String> stringUtf8() {
return stringWithEncoding("UTF-8");
}
public static Codec<String> string() {
return stringUtf8();
}
public static Codec<byte[]> bytearray() {
return new Codec<byte[]>() {
@Override
public byte[] decode(byte[] bytes) {
return bytes;
}
@Override
public byte[] encode(final byte[] value) {
return value;
}
};
}
public static <T extends Serializable> Codec<T> javaSerializer() {
return new Codec<T>() {
@Override
public T decode(byte[] bytes) {
ByteArrayInputStream bis = new ByteArrayInputStream(bytes);
try {
ObjectInput in = new ObjectInputStream(bis);
return (T) in.readObject();
} catch (IOException | ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
@Override
public byte[] encode(T value) {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
try (ObjectOutput out = new ObjectOutputStream(bos)) {
out.writeObject(value);
return bos.toByteArray();
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
};
}
}
| 7,848 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/codec/Codec.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.codec;
public interface Codec<T> extends Encoder<T>, Decoder<T> {
}
| 7,849 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/codec/Encoder.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.codec;
public interface Encoder<T> {
public byte[] encode(T value);
}
| 7,850 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/network/WritableEndpointConfiguration.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.network;
import java.nio.charset.Charset;
/**
* An implementation of {@link NodeConfiguration} for the {@link Endpoint}
* node type
*
* @author pkamath
*/
public class WritableEndpointConfiguration<T> extends NodeConfiguration<WritableEndpoint<T>> {
private static Charset UTF8 = Charset.forName("UTF-8");
@Override
public byte[] getKeyForNode(WritableEndpoint<T> node, int repetition) {
return (node.getSlotId() + "-"
+ Integer.toString(repetition)).getBytes(UTF8);
}
}
| 7,851 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/network/NodeConfiguration.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.network;
/**
* Defines the set of all configuration for a node in the consistent hash ring
*/
public abstract class NodeConfiguration<T> {
final int NUM_REPS = 160;
/**
* Returns a uniquely identifying key, suitable for hashing by the
* {@link HashAlgorithm}. A possible implementation for the return value
* could be: <hostname> + ":" + <port> + "-" +
* <repetition>
*
* @param node
* The Node instance to use to form the unique identifier.
* @param repetition
* The repetition number for the particular node in question (0
* is the first repetition)
* @return The key that represents the specific repetition of the node
*/
public abstract byte[] getKeyForNode(T node, int repetition);
/**
* Returns the number of discrete hashes that should be defined for each
* node in the continuum. For example if each physical node should be
* represented as 255 virtual nodes in the consistent hash ring, this
* function should return 255.
*
* @return a value greater than 0 (default is 160 as is used in
* spy-memcached for {@link HashAlgorithm#KETAMA_HASH}. This is also
* used by the evcache code)
*/
public int getNodeRepetitions() {
return NUM_REPS;
}
}
| 7,852 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/network/WritableEndpoint.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.network;
import com.netflix.spectator.api.BasicTag;
import io.reactivx.mantis.operators.DropOperator;
import java.util.Optional;
import mantis.io.reactivex.netty.channel.ObservableConnection;
import rx.Observable;
import rx.subjects.PublishSubject;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
public class WritableEndpoint<T> extends Endpoint implements Comparable<WritableEndpoint<T>> {
private Subject<T, T> subject;
private ObservableConnection<?, ?> connection;
public WritableEndpoint(String host, int port, String slotId) {
this(host, port, slotId, null);
}
public WritableEndpoint(String host, int port, String slotId,
ObservableConnection<?, ?> connection) {
super(host, port, slotId);
subject = new SerializedSubject<T, T>(PublishSubject.<T>create());
this.connection = connection;
}
public WritableEndpoint(String host, int port) {
super(host, port);
subject = new SerializedSubject<T, T>(PublishSubject.<T>create());
}
public void write(T value) {
subject.onNext(value);
}
public void explicitClose() {
if (connection != null) {
connection.close(true);
}
}
public void complete() {
subject.onCompleted();
explicitClose();
}
public Observable<T> read() {
return subject
.lift(new DropOperator<>("outgoing_subject", new BasicTag("slotId", Optional.ofNullable(slotId).orElse("none"))));
}
@Override
public String toString() {
return "WritableEndpoint [" + super.toString() + "]";
}
public void error(Throwable e) {
subject.onError(e);
explicitClose();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((slotId == null) ? 0 : slotId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Endpoint other = (Endpoint) obj;
if (slotId == null) {
if (other.slotId != null)
return false;
} else if (!slotId.equals(other.slotId))
return false;
return true;
}
@Override
public int compareTo(WritableEndpoint<T> o) {
if (this.equals(o)) {
return 0;
} else {
return o.getSlotId().compareTo(getSlotId());
}
}
}
| 7,853 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/network/HashFunctions.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.network;
public class HashFunctions {
private HashFunctions() {}
public static HashFunction crc32() {
final HashAlgorithm algorithm = HashAlgorithm.CRC32_HASH;
return new HashFunction() {
@Override
public Long call(byte[] keyBytes) {
return algorithm.hash(keyBytes);
}
};
}
public static HashFunction ketama() {
final HashAlgorithm algorithm = HashAlgorithm.KETAMA_HASH;
return new HashFunction() {
@Override
public Long call(byte[] keyBytes) {
return algorithm.hash(keyBytes);
}
};
}
}
| 7,854 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/network/HashAlgorithm.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.network;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.zip.CRC32;
/**
* Known hashing algorithms for locating a server for a key.
* Note that all hash algorithms return 64-bits of hash, but only the lower
* 32-bits are significant. This allows a positive 32-bit number to be
* returned for all cases.
*/
public enum HashAlgorithm {
CRC32_HASH,
KETAMA_HASH;
public static byte[] computeMd5(byte[] keyBytes) {
MessageDigest md5;
try {
md5 = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException("MD5 not supported", e);
}
md5.reset();
md5.update(keyBytes);
return md5.digest();
}
/**
* Compute the hash for the given key.
*
* @return a positive integer hash
*/
public long hash(final byte[] keyBytes) {
long rv = 0;
switch (this) {
case CRC32_HASH:
// return (crc32(shift) >> 16) & 0x7fff;
CRC32 crc32 = new CRC32();
crc32.update(keyBytes);
rv = (crc32.getValue() >> 16) & 0x7fff;
break;
case KETAMA_HASH:
byte[] bKey = computeMd5(keyBytes);
rv = ((long) (bKey[3] & 0xFF) << 24)
| ((long) (bKey[2] & 0xFF) << 16)
| ((long) (bKey[1] & 0xFF) << 8)
| (bKey[0] & 0xFF);
break;
default:
assert false;
}
return rv & 0xffffffffL; /* Truncate to 32-bits */
}
}
| 7,855 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/network/Endpoint.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.network;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.functions.Action0;
import rx.functions.Action1;
public class Endpoint {
private static final Logger logger = LoggerFactory.getLogger(Endpoint.class);
final String slotId;
private final String host;
private final int port;
private final Action0 completedCallback;
private final Action1<Throwable> errorCallback;
public Endpoint(final String host, final int port) {
this(host, port, uniqueHost(host, port, null));
}
public Endpoint(final String host, final int port, final String slotId) {
this(host,
port,
slotId,
new Action0() {
@Override
public void call() {
logger.info("onComplete received for {}:{} slotId {}", host, port, slotId);
}
},
new Action1<Throwable>() {
@Override
public void call(Throwable t1) {
logger.warn("onError for {}:{} slotId {} err {}", host, port, slotId, t1.getMessage(), t1);
}
});
}
public Endpoint(final String host, final int port, final Action0 completedCallback,
final Action1<Throwable> errorCallback) {
this(host, port, uniqueHost(host, port, null), completedCallback, errorCallback);
}
public Endpoint(final String host, final int port, final String slotId, final Action0 completedCallback,
final Action1<Throwable> errorCallback) {
this.host = host;
this.port = port;
this.slotId = slotId;
this.completedCallback = completedCallback;
this.errorCallback = errorCallback;
}
public static String uniqueHost(final String host, final int port, final String slotId) {
if (slotId == null) {
return host + ":" + port;
}
return host + ":" + port + ":" + slotId;
}
public String getSlotId() {
return slotId;
}
public String getHost() {
return host;
}
public int getPort() {
return port;
}
public Action0 getCompletedCallback() {
return completedCallback;
}
public Action1<Throwable> getErrorCallback() {
return errorCallback;
}
@Override
public String toString() {
return "Endpoint{" +
"host='" + host + '\'' +
", port=" + port +
", slotId='" + slotId + '\'' +
'}';
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((host == null) ? 0 : host.hashCode());
result = prime * result + port;
result = prime * result + ((slotId == null) ? 0 : slotId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Endpoint other = (Endpoint) obj;
if (host == null) {
if (other.host != null)
return false;
} else if (!host.equals(other.host))
return false;
if (port != other.port)
return false;
if (slotId == null) {
if (other.slotId != null)
return false;
} else if (!slotId.equals(other.slotId))
return false;
return true;
}
}
| 7,856 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/network/HashFunction.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.network;
import rx.functions.Func1;
public interface HashFunction extends Func1<byte[], Long> {
}
| 7,857 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/network/WorkerEndpoint.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.network;
import rx.functions.Action0;
import rx.functions.Action1;
public class WorkerEndpoint extends Endpoint {
private final int stage;
private final int metricPort;
private final int workerIndex;
private final int workerNum;
public WorkerEndpoint(String host, int port, int stage, int metricPort, int workerIndex, int workerNum) {
super(host, port);
this.stage = stage;
this.metricPort = metricPort;
this.workerIndex = workerIndex;
this.workerNum = workerNum;
}
public WorkerEndpoint(String host, int port, String slotId, int stage, int metricPort, int workerIndex, int workerNum) {
super(host, port, slotId);
this.stage = stage;
this.metricPort = metricPort;
this.workerIndex = workerIndex;
this.workerNum = workerNum;
}
public WorkerEndpoint(String host, int port, int stage, int metricPort, int workerIndex, int workerNum,
Action0 completedCallback, Action1<Throwable> errorCallback) {
super(host, port, completedCallback, errorCallback);
this.stage = stage;
this.metricPort = metricPort;
this.workerIndex = workerIndex;
this.workerNum = workerNum;
}
public WorkerEndpoint(String host, int port, String slotId, int stage, int metricPort, int workerIndex, int workerNum,
Action0 completedCallback, Action1<Throwable> errorCallback) {
super(host, port, slotId, completedCallback, errorCallback);
this.stage = stage;
this.metricPort = metricPort;
this.workerIndex = workerIndex;
this.workerNum = workerNum;
}
public int getStage() {
return stage;
}
public int getMetricPort() {
return metricPort;
}
public int getWorkerIndex() {
return workerIndex;
}
public int getWorkerNum() {
return workerNum;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (!super.equals(o)) return false;
WorkerEndpoint that = (WorkerEndpoint) o;
if (stage != that.stage) return false;
if (metricPort != that.metricPort) return false;
if (workerIndex != that.workerIndex) return false;
return workerNum == that.workerNum;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + stage;
result = 31 * result + metricPort;
result = 31 * result + workerIndex;
result = 31 * result + workerNum;
return result;
}
@Override
public String toString() {
return "WorkerEndpoint{" +
"stage=" + stage +
", metricPort=" + metricPort +
", workerIndex=" + workerIndex +
", workerNum=" + workerNum +
", endpoint=" + super.toString() +
'}';
}
}
| 7,858 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/network/ConsistentHash.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.network;
import java.util.Collection;
import java.util.SortedMap;
import java.util.TreeMap;
/**
* A Consistent Hashing implementation class which can work with existing node
* types representing nodes to which requests are handed.
* <p>
* This class is by design not thread-safe for node mutations
* (Additions/removals) Such mutations should be handled externally and a new
* instance of this class should be created with the new node list. This allows
* for lock free implementation in this class keeping the code performant. (For
* instance, in the caller application there can be a thread to poll discovery
* every so often to get an updated node list and if there are differences
* between the previously held list and the new one, a new instance of
* {@link ConsistentHash} can be created and the old one can be swapped with the
* new one. Also the calling application can decide how nodes map to keys used
* for hashing through the {@link NodeConfiguration} implementation - for
* instance they can ensure that new nodes take the same "slot" as an old one it
* is replacing by keeping track of global slots.)
* <p>
* Callers should verify that the node returned by
* {@link ConsistentHash#get(byte[])} is alive before using it to retrieve the
* value.
* <p>
* Copied mostly from http://weblogs.java.net/blog/2007/11/27/consistent-hashing
* Also inspired by KetamaNodeLocator in spy memcached code
*
* @param <T> Type representing a node object
*
* @author pkamath
*/
public class ConsistentHash<T> {
private final HashFunction hashAlgo;
private final NodeConfiguration<T> nodeConfig;
private final SortedMap<Long, T> ring = new TreeMap<Long, T>();
public ConsistentHash(HashFunction hashAlgo,
NodeConfiguration<T> nodeConfig, Collection<T> nodes) {
if (hashAlgo == null || nodeConfig == null || nodes == null
|| nodes.isEmpty()) {
throw new IllegalArgumentException(
"Constructor args to "
+ "ConsistentHash should be non null and the collection of "
+ "nodes should be non empty");
}
this.hashAlgo = hashAlgo;
this.nodeConfig = nodeConfig;
for (T node : nodes) {
add(node);
}
}
private void add(T node) {
int numReps = nodeConfig.getNodeRepetitions();
if (numReps < 0) {
throw new IllegalArgumentException("Number of repetitions of a "
+ "node should be positive");
}
for (int i = 0; i < numReps; i++) {
putInRing(hashAlgo.call(nodeConfig.getKeyForNode(node, i)),
node);
}
}
void putInRing(Long hash, T node) {
if (ring.containsKey(hash)) {
// LOG.warn("Duplicate virtual node being added to ring - possibly due to a faulty implementation "
// + "of NodeConfiguration.getKeyForNode() which is not returning unique values for different "
// + "repititions");
}
ring.put(hash, node);
}
/**
* Returns the node which should contain the supplied key per consistent
* hashing algorithm
*
* @param keyBytes key to search on
*
* @return Node which should contain the key, null if no Nodes are present
* Callers should verify that the node returned is alive before
* using it to retrieve the value.
*/
public T get(byte[] keyBytes) {
Long hash = hashAlgo.call(keyBytes);
if (!ring.containsKey(hash)) {
SortedMap<Long, T> tailMap = ring.tailMap(hash);
hash = tailMap.isEmpty() ? ring.firstKey() : tailMap.firstKey();
}
return ring.get(hash);
}
// for unit tests - package access only
SortedMap<Long, T> getRing() {
return ring;
}
HashFunction getHashAlgo() {
return hashAlgo;
}
}
| 7,859 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/network/EndpointConfiguration.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.network;
import java.nio.charset.Charset;
/**
* An implementation of {@link NodeConfiguration} for the {@link Endpoint}
* node type
*
* @author pkamath
*/
public class EndpointConfiguration extends NodeConfiguration<Endpoint> {
private static Charset UTF8 = Charset.forName("UTF-8");
@Override
public byte[] getKeyForNode(Endpoint node, int repetition) {
return (node.getSlotId() + "-"
+ Integer.toString(repetition)).getBytes(UTF8);
}
}
| 7,860 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/network/ServerSlotManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.network;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ServerSlotManager<T> {
private static final String CLIENT_ID = "clientId";
private static final Logger LOG = LoggerFactory.getLogger(ServerSlotManager.class);
ConcurrentHashMap<String, SlotAssignmentManager<T>> slotManagerMap = new ConcurrentHashMap<String, SlotAssignmentManager<T>>();
private HashFunction hashAlgorithm;
public ServerSlotManager(HashFunction hashAlgorithm) {
this.hashAlgorithm = hashAlgorithm;
}
public synchronized SlotAssignmentManager<T> registerServer(WritableEndpoint<T> node, Map<String, List<String>> params) {
LOG.info("Registering server.node: " + node);
String subId = getSubscriptionId(params);
if (subId == null) {
subId = node.getSlotId();
}
SlotAssignmentManager<T> sam = slotManagerMap.get(subId);
// if slot manager doesn't already exist. create it
if (sam == null) {
LOG.info("Setting up new SlotAssignmentManager for sub: " + subId);
sam = new SlotAssignmentManager<T>(hashAlgorithm, subId);
slotManagerMap.putIfAbsent(subId, sam);
}
sam.registerServer(node);
return sam;
}
public synchronized void deregisterServer(WritableEndpoint<T> node, Map<String, List<String>> params) {
String subId = getSubscriptionId(params);
if (subId == null) {
subId = node.getSlotId();
}
SlotAssignmentManager<T> sam = slotManagerMap.get(subId);
if (sam != null) {
sam.deregisterServer(node);
// if its empty remove it
if (sam.isEmpty()) {
slotManagerMap.remove(subId);
}
}
}
private String getSubscriptionId(Map<String, List<String>> queryParams) {
if (queryParams != null && !queryParams.isEmpty()) {
List<String> subIdList = queryParams.get(CLIENT_ID);
if (subIdList != null && !subIdList.isEmpty()) {
return subIdList.get(0);
}
}
return null;
}
public static class SlotAssignmentManager<T> {
AtomicReference<ConsistentHash<WritableEndpoint<T>>> consistentHashRef = new AtomicReference<ConsistentHash<WritableEndpoint<T>>>();
ConcurrentSkipListSet<WritableEndpoint<T>> nodeList = new ConcurrentSkipListSet<WritableEndpoint<T>>();
ConcurrentHashMap<String, Integer> connectionIdToSlotNumberMap = new ConcurrentHashMap<String, Integer>();
private String consumerJobId;
private HashFunction hashAlgo;
private Gauge nodesOnRing;
public SlotAssignmentManager(HashFunction hashAlgo, String subId) {
this.consumerJobId = subId;
this.hashAlgo = hashAlgo;
Metrics metrics = new Metrics.Builder()
.name("SlottingRing_" + consumerJobId)
.addGauge("nodeCount")
.build();
metrics = MetricsRegistry.getInstance().registerAndGet(metrics);
nodesOnRing = metrics.getGauge("nodeCount");
}
public synchronized boolean forceRegisterServer(WritableEndpoint<T> sn) {
LOG.info("Ring: " + consumerJobId + " before force register: " + nodeList);
boolean success = nodeList.add(sn);
if (!success) {
// force add, existing connection exists with slot
WritableEndpoint<T> oldEndpoint = nodeList.tailSet(sn, true).first();
boolean removed = nodeList.remove(oldEndpoint);
if (removed) {
success = nodeList.add(sn);
LOG.info("Explicitly would have closed endpoint: " + oldEndpoint);
//oldEndpoint.explicitClose();
}
}
LOG.info("node " + sn + " add " + success);
LOG.info("Ring: " + consumerJobId + " after force register: " + nodeList);
ConsistentHash<WritableEndpoint<T>> newConsistentHash = new ConsistentHash<WritableEndpoint<T>>(hashAlgo, new WritableEndpointConfiguration<T>(), nodeList);
consistentHashRef.set(newConsistentHash);
nodesOnRing.set(nodeList.size());
return success;
}
public synchronized boolean registerServer(WritableEndpoint<T> sn) {
LOG.info("Ring: " + consumerJobId + " before register: " + nodeList);
boolean success = nodeList.add(sn);
LOG.info("node " + sn + " add " + success);
LOG.info("Ring: " + consumerJobId + " after register: " + nodeList);
ConsistentHash<WritableEndpoint<T>> newConsistentHash = new ConsistentHash<WritableEndpoint<T>>(hashAlgo, new WritableEndpointConfiguration<T>(), nodeList);
consistentHashRef.set(newConsistentHash);
nodesOnRing.set(nodeList.size());
return success;
}
public synchronized boolean deregisterServer(WritableEndpoint<T> node) {
LOG.info("Ring: " + consumerJobId + " before deregister: " + nodeList);
boolean success = nodeList.remove(node);
LOG.info("node " + node + " removed " + success);
LOG.info("Ring: " + consumerJobId + " after deregister: " + nodeList);
if (!nodeList.isEmpty()) {
ConsistentHash<WritableEndpoint<T>> newConsistentHash = new ConsistentHash<WritableEndpoint<T>>(hashAlgo, new WritableEndpointConfiguration<T>(), nodeList);
consistentHashRef.set(newConsistentHash);
}
nodesOnRing.set(nodeList.size());
return success;
}
public boolean filter(WritableEndpoint<T> node, byte[] keyBytes) {
if (nodeList.size() > 1) {
return node.equals(consistentHashRef.get().get(keyBytes));
} else {
return true;
}
}
public Collection<WritableEndpoint<T>> endpoints() {
return nodeList;
}
public WritableEndpoint<T> lookup(byte[] keyBytes) {
return consistentHashRef.get().get(keyBytes);
}
public boolean isEmpty() {
return nodeList.isEmpty();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime
* result
+ ((connectionIdToSlotNumberMap == null) ? 0
: connectionIdToSlotNumberMap.hashCode());
result = prime * result
+ ((consumerJobId == null) ? 0 : consumerJobId.hashCode());
result = prime * result
+ ((nodeList == null) ? 0 : nodeList.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SlotAssignmentManager other = (SlotAssignmentManager) obj;
if (connectionIdToSlotNumberMap == null) {
if (other.connectionIdToSlotNumberMap != null)
return false;
} else if (!connectionIdToSlotNumberMap
.equals(other.connectionIdToSlotNumberMap))
return false;
if (consumerJobId == null) {
if (other.consumerJobId != null)
return false;
} else if (!consumerJobId.equals(other.consumerJobId))
return false;
if (nodeList == null) {
if (other.nodeList != null)
return false;
} else if (!nodeList.equals(other.nodeList))
return false;
return true;
}
}
}
| 7,861 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/network/RoundRobinRouter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.network;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class RoundRobinRouter<T> {
private Map<String, WritableEndpoint<T>> endpoints = new HashMap<>();
private List<String> idList = new ArrayList<String>();
private int currentListSize;
private int count;
private Metrics metrics;
private Gauge activeConnections;
public RoundRobinRouter() {
metrics = new Metrics.Builder()
.name("RoundRobin")
.addGauge("activeConnections")
.build();
activeConnections = metrics.getGauge("activeConnections");
}
public Metrics getMetrics() {
return metrics;
}
public synchronized boolean add(WritableEndpoint<T> endpoint) {
String id = Endpoint.uniqueHost(endpoint.getHost(),
endpoint.getPort(), endpoint.getSlotId());
boolean added = false;
if (!endpoints.containsKey(id)) {
endpoints.put(id, endpoint);
idList.add(id);
currentListSize++;
added = true;
}
activeConnections.set(endpoints.size());
return added;
}
public synchronized boolean remove(WritableEndpoint<T> endpoint) {
String id = Endpoint.uniqueHost(endpoint.getHost(),
endpoint.getPort(), endpoint.getSlotId());
boolean removed = false;
if (endpoints.containsKey(id)) {
endpoints.remove(id);
idList.remove(id);
currentListSize--;
removed = true;
}
activeConnections.set(endpoints.size());
return removed;
}
public synchronized WritableEndpoint<T> nextSlot() {
return endpoints.get(idList.get((count++ & Integer.MAX_VALUE) % currentListSize));
}
// TODO should completeAll and errorAll de-register?
public synchronized void completeAllEndpoints() {
for (WritableEndpoint<T> endpoint : endpoints.values()) {
endpoint.complete();
}
}
public synchronized boolean isEmpty() {
return endpoints.isEmpty();
}
public synchronized void errorAllEndpoints(Throwable e) {
for (WritableEndpoint<T> endpoint : endpoints.values()) {
endpoint.error(e);
}
}
}
| 7,862 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/storage/StorageUnit.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.storage;
import java.math.BigDecimal;
import java.math.RoundingMode;
/**
* Class that maintains different forms of Storage Units.
* Copied from <a href="https://raw.githubusercontent.com/apache/hadoop/03cfc852791c14fad39db4e5b14104a276c08e59/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageUnit.java">Link.</a>
*/
public enum StorageUnit {
/*
We rely on BYTES being the last to get longest matching short names first.
The short name of bytes is b and it will match with other longer names.
if we change this order, the corresponding code in
Configuration#parseStorageUnit needs to be changed too, since values()
call returns the Enums in declared order and we depend on it.
*/
EB {
@Override
public double toBytes(double value) {
return multiply(value, EXABYTES);
}
@Override
public double toKBs(double value) {
return multiply(value, EXABYTES / KILOBYTES);
}
@Override
public double toMBs(double value) {
return multiply(value, EXABYTES / MEGABYTES);
}
@Override
public double toGBs(double value) {
return multiply(value, EXABYTES / GIGABYTES);
}
@Override
public double toTBs(double value) {
return multiply(value, EXABYTES / TERABYTES);
}
@Override
public double toPBs(double value) {
return multiply(value, EXABYTES / PETABYTES);
}
@Override
public double toEBs(double value) {
return value;
}
@Override
public String getLongName() {
return "exabytes";
}
@Override
public String getShortName() {
return "eb";
}
@Override
public String getSuffixChar() {
return "e";
}
@Override
public double getDefault(double value) {
return toEBs(value);
}
@Override
public double fromBytes(double value) {
return divide(value, EXABYTES);
}
},
PB {
@Override
public double toBytes(double value) {
return multiply(value, PETABYTES);
}
@Override
public double toKBs(double value) {
return multiply(value, PETABYTES / KILOBYTES);
}
@Override
public double toMBs(double value) {
return multiply(value, PETABYTES / MEGABYTES);
}
@Override
public double toGBs(double value) {
return multiply(value, PETABYTES / GIGABYTES);
}
@Override
public double toTBs(double value) {
return multiply(value, PETABYTES / TERABYTES);
}
@Override
public double toPBs(double value) {
return value;
}
@Override
public double toEBs(double value) {
return divide(value, EXABYTES / PETABYTES);
}
@Override
public String getLongName() {
return "petabytes";
}
@Override
public String getShortName() {
return "pb";
}
@Override
public String getSuffixChar() {
return "p";
}
@Override
public double getDefault(double value) {
return toPBs(value);
}
@Override
public double fromBytes(double value) {
return divide(value, PETABYTES);
}
},
TB {
@Override
public double toBytes(double value) {
return multiply(value, TERABYTES);
}
@Override
public double toKBs(double value) {
return multiply(value, TERABYTES / KILOBYTES);
}
@Override
public double toMBs(double value) {
return multiply(value, TERABYTES / MEGABYTES);
}
@Override
public double toGBs(double value) {
return multiply(value, TERABYTES / GIGABYTES);
}
@Override
public double toTBs(double value) {
return value;
}
@Override
public double toPBs(double value) {
return divide(value, PETABYTES / TERABYTES);
}
@Override
public double toEBs(double value) {
return divide(value, EXABYTES / TERABYTES);
}
@Override
public String getLongName() {
return "terabytes";
}
@Override
public String getShortName() {
return "tb";
}
@Override
public String getSuffixChar() {
return "t";
}
@Override
public double getDefault(double value) {
return toTBs(value);
}
@Override
public double fromBytes(double value) {
return divide(value, TERABYTES);
}
},
GB {
@Override
public double toBytes(double value) {
return multiply(value, GIGABYTES);
}
@Override
public double toKBs(double value) {
return multiply(value, GIGABYTES / KILOBYTES);
}
@Override
public double toMBs(double value) {
return multiply(value, GIGABYTES / MEGABYTES);
}
@Override
public double toGBs(double value) {
return value;
}
@Override
public double toTBs(double value) {
return divide(value, TERABYTES / GIGABYTES);
}
@Override
public double toPBs(double value) {
return divide(value, PETABYTES / GIGABYTES);
}
@Override
public double toEBs(double value) {
return divide(value, EXABYTES / GIGABYTES);
}
@Override
public String getLongName() {
return "gigabytes";
}
@Override
public String getShortName() {
return "gb";
}
@Override
public String getSuffixChar() {
return "g";
}
@Override
public double getDefault(double value) {
return toGBs(value);
}
@Override
public double fromBytes(double value) {
return divide(value, GIGABYTES);
}
},
MB {
@Override
public double toBytes(double value) {
return multiply(value, MEGABYTES);
}
@Override
public double toKBs(double value) {
return multiply(value, MEGABYTES / KILOBYTES);
}
@Override
public double toMBs(double value) {
return value;
}
@Override
public double toGBs(double value) {
return divide(value, GIGABYTES / MEGABYTES);
}
@Override
public double toTBs(double value) {
return divide(value, TERABYTES / MEGABYTES);
}
@Override
public double toPBs(double value) {
return divide(value, PETABYTES / MEGABYTES);
}
@Override
public double toEBs(double value) {
return divide(value, EXABYTES / MEGABYTES);
}
@Override
public String getLongName() {
return "megabytes";
}
@Override
public String getShortName() {
return "mb";
}
@Override
public String getSuffixChar() {
return "m";
}
@Override
public double fromBytes(double value) {
return divide(value, MEGABYTES);
}
@Override
public double getDefault(double value) {
return toMBs(value);
}
},
KB {
@Override
public double toBytes(double value) {
return multiply(value, KILOBYTES);
}
@Override
public double toKBs(double value) {
return value;
}
@Override
public double toMBs(double value) {
return divide(value, MEGABYTES / KILOBYTES);
}
@Override
public double toGBs(double value) {
return divide(value, GIGABYTES / KILOBYTES);
}
@Override
public double toTBs(double value) {
return divide(value, TERABYTES / KILOBYTES);
}
@Override
public double toPBs(double value) {
return divide(value, PETABYTES / KILOBYTES);
}
@Override
public double toEBs(double value) {
return divide(value, EXABYTES / KILOBYTES);
}
@Override
public String getLongName() {
return "kilobytes";
}
@Override
public String getShortName() {
return "kb";
}
@Override
public String getSuffixChar() {
return "k";
}
@Override
public double getDefault(double value) {
return toKBs(value);
}
@Override
public double fromBytes(double value) {
return divide(value, KILOBYTES);
}
},
BYTES {
@Override
public double toBytes(double value) {
return value;
}
@Override
public double toKBs(double value) {
return divide(value, KILOBYTES);
}
@Override
public double toMBs(double value) {
return divide(value, MEGABYTES);
}
@Override
public double toGBs(double value) {
return divide(value, GIGABYTES);
}
@Override
public double toTBs(double value) {
return divide(value, TERABYTES);
}
@Override
public double toPBs(double value) {
return divide(value, PETABYTES);
}
@Override
public double toEBs(double value) {
return divide(value, EXABYTES);
}
@Override
public String getLongName() {
return "bytes";
}
@Override
public String getShortName() {
return "b";
}
@Override
public String getSuffixChar() {
return "b";
}
@Override
public double getDefault(double value) {
return toBytes(value);
}
@Override
public double fromBytes(double value) {
return value;
}
};
private static final double BYTE = 1L;
private static final double KILOBYTES = BYTE * 1024L;
private static final double MEGABYTES = KILOBYTES * 1024L;
private static final double GIGABYTES = MEGABYTES * 1024L;
private static final double TERABYTES = GIGABYTES * 1024L;
private static final double PETABYTES = TERABYTES * 1024L;
private static final double EXABYTES = PETABYTES * 1024L;
private static final int PRECISION = 4;
/**
* Using BigDecimal to avoid issues with overflow and underflow.
*
* @param value - value
* @param divisor - divisor.
* @return -- returns a double that represents this value
*/
private static double divide(double value, double divisor) {
BigDecimal val = new BigDecimal(value);
BigDecimal bDivisor = new BigDecimal(divisor);
return val.divide(bDivisor).setScale(PRECISION, RoundingMode.HALF_UP)
.doubleValue();
}
/**
* Using BigDecimal so we can throw if we are overflowing the Long.Max.
*
* @param first - First Num.
* @param second - Second Num.
* @return Returns a double
*/
private static double multiply(double first, double second) {
BigDecimal firstVal = new BigDecimal(first);
BigDecimal secondVal = new BigDecimal(second);
return firstVal.multiply(secondVal)
.setScale(PRECISION, RoundingMode.HALF_UP).doubleValue();
}
public abstract double toBytes(double value);
public abstract double toKBs(double value);
public abstract double toMBs(double value);
public abstract double toGBs(double value);
public abstract double toTBs(double value);
public abstract double toPBs(double value);
public abstract double toEBs(double value);
public abstract String getLongName();
public abstract String getShortName();
public abstract String getSuffixChar();
public abstract double getDefault(double value);
public abstract double fromBytes(double value);
public String toString() {
return getLongName();
}
}
| 7,863 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/properties/DefaultMantisPropertiesLoader.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.properties;
import io.mantisrx.common.MantisProperties;
import java.util.Map;
import java.util.Properties;
public class DefaultMantisPropertiesLoader implements MantisPropertiesLoader {
protected Properties props;
private Map<String, String> env;
public DefaultMantisPropertiesLoader(Properties props) {
this.props = props;
env = System.getenv();
}
/* (non-Javadoc)
* @see io.mantisrx.common.MantisProperties#getStringValue(java.lang.String)
*/
@Override
public String getStringValue(String name, String defaultVal) {
if (name != null) {
return MantisProperties.getProperty("JOB_PARAM_" + name, MantisProperties.getProperty(name, defaultVal));
}
return defaultVal;
}
@Override
public void initalize() {
// TODO Auto-generated method stub
}
@Override
public void shutdown() {
// TODO Auto-generated method stub
}
}
| 7,864 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/properties/MantisPropertiesLoader.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.properties;
public interface MantisPropertiesLoader {
public abstract void initalize();
public abstract void shutdown();
public abstract String getStringValue(String name, String defaultVal);
} | 7,865 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/common/compression/CompressionUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.compression;
import io.mantisrx.common.MantisServerSentEvent;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Base64;
import java.util.List;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xerial.snappy.Snappy;
public class CompressionUtils {
public static final String MANTIS_SSE_DELIMITER = "$$$";
public static final byte[] MANTIS_SSE_DELIMITER_BINARY = MANTIS_SSE_DELIMITER.getBytes();
private static Logger logger = LoggerFactory.getLogger(CompressionUtils.class);
public static String compressAndBase64Encode(List<String> events, boolean useSnappy) {
return compressAndBase64Encode(events, useSnappy, MANTIS_SSE_DELIMITER_BINARY);
}
public static String compressAndBase64Encode(List<String> events, boolean useSnappy, byte[] delimiter) {
if (!events.isEmpty()) {
StringBuilder sb = new StringBuilder();
for (String event : events) {
sb.append(event);
sb.append(delimiter);
}
try {
byte[] compressedBytes;
if (useSnappy) {
compressedBytes = snappyCompressData(sb.toString());
} else {
compressedBytes = gzipCompressData(sb.toString());
}
String encodedData = Base64.getEncoder().encodeToString(compressedBytes);
if (logger.isDebugEnabled()) { logger.debug("Encoded Data --> " + encodedData); }
return encodedData;
} catch (UnsupportedEncodingException e) {
logger.warn("Error encoding messages:" + e.getMessage());
} catch (IOException e) {
logger.warn("Error encoding messages2:" + e.getMessage());
}
}
return null;
}
public static String compressAndBase64Encode(List<String> events) {
return compressAndBase64Encode(events, false);
}
public static byte[] compressAndBase64EncodeBytes(List<List<byte[]>> nestedEvents, boolean useSnappy) {
return compressAndBase64EncodeBytes(nestedEvents, useSnappy, MANTIS_SSE_DELIMITER_BINARY);
}
public static byte[] compressAndBase64EncodeBytes(List<List<byte[]>> nestedEvents, boolean useSnappy, byte[] delimiter) {
if (!nestedEvents.isEmpty()) {
ByteBuffer buffer = ByteBuffer.allocate(getTotalByteSize(nestedEvents, delimiter));
for (List<byte[]> outerList : nestedEvents) {
for (byte[] event : outerList) {
buffer.put(event);
buffer.put(delimiter);
}
}
try {
byte[] compressedBytes;
if (useSnappy) {
compressedBytes = snappyCompressData(buffer.array());
} else {
compressedBytes = gzipCompressData(buffer.array());
}
String encodedData = Base64.getEncoder().encodeToString(compressedBytes);
if (logger.isDebugEnabled()) { logger.debug("Encoded Data --> " + encodedData); }
return encodedData.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
logger.warn("Error encoding messages:" + e.getMessage());
} catch (IOException e) {
logger.warn("Error encoding messages2:" + e.getMessage());
}
}
return null;
}
@Deprecated
public static byte[] compressAndBase64EncodeBytes(List<List<byte[]>> nestedEvents) {
if (!nestedEvents.isEmpty()) {
ByteBuffer buffer = ByteBuffer.allocate(getTotalByteSize(nestedEvents, MANTIS_SSE_DELIMITER_BINARY));
for (List<byte[]> outerList : nestedEvents) {
for (byte[] event : outerList) {
buffer.put(event);
buffer.put(MANTIS_SSE_DELIMITER_BINARY);
}
}
try {
byte[] compressedBytes = gzipCompressData(buffer.array());
String encodedData = Base64.getEncoder().encodeToString(compressedBytes);
if (logger.isDebugEnabled()) { logger.debug("Encoded Data --> " + encodedData); }
return encodedData.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
logger.warn("Error encoding messages:" + e.getMessage());
} catch (IOException e) {
logger.warn("Error encoding messages2:" + e.getMessage());
}
}
return null;
}
private static int getTotalByteSize(List<List<byte[]>> nestedEvents, byte[] delimiter) {
int size = 0;
int count = 0;
for (List<byte[]> outerList : nestedEvents) {
for (byte[] event : outerList) {
count++;
size += event.length;
}
}
return size + count * delimiter.length;
}
public static List<MantisServerSentEvent> decompressAndBase64Decode_old(String encodedString, boolean isCompressedBinary) {
encodedString = encodedString.trim();
// System.out.println("Inside client decompress Current thread -->" + Thread.currentThread().getName());
if (!encodedString.isEmpty() && isCompressedBinary && !encodedString.startsWith("ping") && !encodedString.startsWith("{")) {
if (logger.isDebugEnabled()) { logger.debug("decoding " + encodedString); }
byte[] decoded = Base64.getDecoder().decode(encodedString);
GZIPInputStream gis;
try {
gis = new GZIPInputStream(new ByteArrayInputStream(decoded));
BufferedReader bf = new BufferedReader(new InputStreamReader(gis, "UTF-8"));
String outStr = "";
String line;
while ((line = bf.readLine()) != null) {
outStr += line;
}
String[] toks = outStr.split("\\$\\$\\$");
List<MantisServerSentEvent> msseList = new ArrayList<>();
for (String tok : toks) {
msseList.add(new MantisServerSentEvent(tok));
}
//return Arrays.asList(new MantisServerSentEvent(toks));
return msseList;
} catch (IOException e) {
logger.error(e.getMessage());
}
return new ArrayList<MantisServerSentEvent>();
} else {
List<MantisServerSentEvent> s = new ArrayList<MantisServerSentEvent>();
s.add(new MantisServerSentEvent(encodedString));
return s;
}
}
static List<MantisServerSentEvent> tokenize(BufferedReader br) throws IOException {
return tokenize(br, MANTIS_SSE_DELIMITER);
}
static List<MantisServerSentEvent> tokenize(BufferedReader bf, String delimiter) throws IOException {
StringBuilder sb = new StringBuilder();
String line;
List<MantisServerSentEvent> msseList = new ArrayList<>();
final int delimiterLength = delimiter.length();
char[] delimiterArray = delimiter.toCharArray();
int delimiterCount = 0;
while ((line = bf.readLine()) != null) {
// Consider replacing this whole thing with just String.indexOf
for (int i = 0; i < line.length(); i++) {
if (line.charAt(i) != delimiterArray[delimiterCount]) {
if (delimiterCount > 0) {
boolean prefixMatch = true;
for (int j = delimiterCount - 1; j >= 0; j--) {
if (line.charAt(i) != delimiterArray[j]) {
prefixMatch = false;
break;
}
}
if (!prefixMatch) {
for (int j = 0; j < delimiterCount; ++j) {
sb.append(delimiterArray[j]);
}
delimiterCount = 0;
}
}
if (line.charAt(i) != delimiterArray[delimiterCount]) {
sb.append(line.charAt(i));
} else {
delimiterCount++;
}
} else {
delimiterCount++;
}
if (delimiterCount == delimiterLength) {
msseList.add(new MantisServerSentEvent(sb.toString()));
delimiterCount = 0;
sb = new StringBuilder();
}
}
}
// We have a trailing event.
if (sb.length() > 0) {
// We had a partial delimiter match which was not in the builder.
if (delimiterCount > 0) {
for (int j = 0; j < delimiterCount; ++j) {
sb.append(delimiter.charAt(j));
}
}
msseList.add(new MantisServerSentEvent(sb.toString()));
}
return msseList;
}
static List<MantisServerSentEvent> tokenize_1(BufferedReader bf) throws IOException {
StringBuilder sb = new StringBuilder();
String line;
List<MantisServerSentEvent> msseList = new ArrayList<>();
String outStr = "";
while ((line = bf.readLine()) != null) {
sb.append(line);
}
int i = 0;
outStr = sb.toString();
sb = new StringBuilder();
while (i < outStr.length()) {
while (outStr.charAt(i) != '$') {
sb.append(outStr.charAt(i));
i++;
}
if (i + 3 < outStr.length()) {
if (outStr.charAt(i) == '$' && outStr.charAt(i + 1) == '$' && outStr.charAt(i + 2) == '$') {
i += 3;
msseList.add(new MantisServerSentEvent(sb.toString()));
sb = new StringBuilder();
}
} else {
sb.append(outStr.charAt(i));
i++;
}
}
return msseList;
}
static List<MantisServerSentEvent> tokenize_2(BufferedReader bf) throws IOException {
StringBuilder sb = new StringBuilder();
String line;
List<MantisServerSentEvent> msseList = new ArrayList<>();
String outStr = "";
while ((line = bf.readLine()) != null) {
sb.append(line);
}
outStr = sb.toString();
String[] toks = outStr.split("\\$\\$\\$");
for (String tok : toks) {
msseList.add(new MantisServerSentEvent(tok));
}
return msseList;
}
public static List<MantisServerSentEvent> decompressAndBase64Decode(String encodedString,
boolean isCompressedBinary,
boolean useSnappy) {
return decompressAndBase64Decode(encodedString, isCompressedBinary, useSnappy, null);
}
public static List<MantisServerSentEvent> decompressAndBase64Decode(String encodedString,
boolean isCompressedBinary,
boolean useSnappy,
String delimiter) {
encodedString = encodedString.trim();
// System.out.println("Inside client decompress Current thread -->" + Thread.currentThread().getName());
if (!encodedString.isEmpty() && isCompressedBinary && !encodedString.startsWith("ping") && !encodedString.startsWith("{")) {
if (logger.isDebugEnabled()) { logger.debug("decoding " + encodedString); }
byte[] decoded = Base64.getDecoder().decode(encodedString);
try {
if (useSnappy) {
return delimiter == null
? tokenize(snappyDecompress(decoded))
: tokenize(snappyDecompress(decoded), delimiter);
} else {
return delimiter == null
? tokenize(gzipDecompress(decoded))
: tokenize(gzipDecompress(decoded), delimiter);
}
} catch (IOException e) {
logger.error(e.getMessage());
}
return new ArrayList<MantisServerSentEvent>();
} else {
List<MantisServerSentEvent> s = new ArrayList<MantisServerSentEvent>();
s.add(new MantisServerSentEvent(encodedString));
return s;
}
}
@Deprecated
public static List<MantisServerSentEvent> decompressAndBase64Decode(String encodedString, boolean isCompressedBinary) {
return decompressAndBase64Decode(encodedString, isCompressedBinary, false);
}
static byte[] snappyCompressData(String data) throws IOException {
return Snappy.compress(data);
}
static byte[] snappyCompressData(byte[] data) throws IOException {
return Snappy.compress(data);
}
/*protected*/
static byte[] gzipCompressData(String data) throws IOException, UnsupportedEncodingException {
ByteArrayOutputStream obj = new ByteArrayOutputStream();
GZIPOutputStream gzip = new GZIPOutputStream(obj);
gzip.write(data.getBytes("UTF-8"));
gzip.close();
byte[] compressedBytes = obj.toByteArray();
return compressedBytes;
}
/*protected*/
static byte[] gzipCompressData(byte[] data) throws IOException, UnsupportedEncodingException {
ByteArrayOutputStream obj = new ByteArrayOutputStream();
GZIPOutputStream gzip = new GZIPOutputStream(obj);
gzip.write(data);
gzip.close();
byte[] compressedBytes = obj.toByteArray();
return compressedBytes;
}
static BufferedReader snappyDecompress(byte[] data) throws IOException {
byte[] decompressed = Snappy.uncompress(data);
ByteArrayInputStream bais = new ByteArrayInputStream(decompressed);
BufferedReader bf = new BufferedReader(new InputStreamReader(bais, "UTF-8"));
return bf;
}
static BufferedReader gzipDecompress(byte[] data) throws IOException {
GZIPInputStream gis = new GZIPInputStream(new ByteArrayInputStream(data));
BufferedReader bf = new BufferedReader(new InputStreamReader(gis, "UTF-8"));
return bf;
}
public static void main(String[] args) {
String d = "{\"ip\":\"50.112.119.64\",\"count\":27}$$${\\\"ip\\\":\\\"50.112.119.64\\\",\\\"count\\\":27}";
String e1 = "{\"ip\":\"11.112.119.64\",\"count\":27}";
String e2 = "{\"ip\":\"22.111.112.62\",\"count\":27}";
String e3 = "{\"ip\":\"33.222.112.62\",\"count\":27}";
List<String> events = new ArrayList<>();
events.add(e1);
events.add(e2);
events.add(e3);
String encodedString = CompressionUtils.compressAndBase64Encode(events);
List<MantisServerSentEvent> orig = CompressionUtils.decompressAndBase64Decode(encodedString, true);
for (MantisServerSentEvent event : orig) {
System.out.println("event -> " + event);
}
// String d2 = "blah1$$$blah3$$$blah4";
// System.out.println("pos " + d2.indexOf("$$$"));
// String [] toks = d.split("\\$\\$\\$");
//
// System.out.println("toks len" + toks.length);
// for(int i=0; i<toks.length; i++) {
// System.out.println("t -> " + toks[i]);
// }
}
}
| 7,866 |
0 | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common/utils/MantisMetricStringConstants.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mantisrx.common.utils;
import io.reactivx.mantis.operators.DropOperator;
public class MantisMetricStringConstants {
public static final String INCOMING = "incoming";
public static final String DROP_OPERATOR_INCOMING_METRIC_GROUP = String.format("%s_%s", DropOperator.METRIC_GROUP, INCOMING);
public static final String GROUP_ID_TAG = "groupId";
}
| 7,867 |
0 | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common/utils/MantisSourceJobConstants.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mantisrx.common.utils;
public class MantisSourceJobConstants {
public static final String SUBSCRIPTION_ID_PARAM_NAME = "subscriptionId";
public static final String CRITERION_PARAM_NAME = "criterion";
public static final String FILTER_PARAM_NAME = "filter";
public static final String TARGET_APP_PARAMETER_NAME = "targetApp";
public static final String TARGET_ASG_CSV_PARAM = "targetASGs";
public static final String TARGET_SERVER_READ_TIMEOUT_SECS = "targetReadTimeoutSecs";
public static final String ECHO_STAGE_BUFFER_MILLIS = "bufferDurationMillis";
public static final String REGION_PARAMETER_NAME = "region";
public static final String ENABLE_COMPRESSED_BINARY_INPUT_PARAMETER_NAME = "enableCompressedBinaryInput";
public static final String DECODE_INCOMING_DATA = "enableDecoding";
public static final String DECODE_FIELD_SUFFIX = "decodeSuffix";
public static final String ZONE_LIST_PARAMETER_NAME = "zoneList";
public static final String SOURCE_JOB_NAME_KEY = "sourceJobName";
public static final String TARGET_APP_NAME_KEY = "targetApp";
public static final String CLIENT_ID_PARAMETER_NAME = "clientId";
public static final String MANTIS_META_SOURCE_NAME = "\"mantis.meta.sourceName\"";
public static final String TARGET_KEY = "target";
public static final String NOT_SET = "NOT_SET";
}
| 7,868 |
0 | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common/utils/Closeables.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mantisrx.common.utils;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.io.IOExceptionList;
public class Closeables {
/**
* Combines a list of closeables into a composite closeable which when called closes the closeables one-by-one.
*
* @param closeables closeables that all need to be closed
* @return a composite closeable
*/
public static Closeable combine(Collection<? extends Closeable> closeables) {
return combine(closeables.toArray(new Closeable[0]));
}
public static Closeable combine(Closeable... closeables) {
return () -> {
List<Throwable> list = new ArrayList<>();
for (Closeable closeable: closeables) {
try {
closeable.close();
} catch (IOException e) {
list.add(new Exception(String.format("Failed to close %s", closeable), e));
}
}
if (!list.isEmpty()) {
throw new IOExceptionList(list);
}
};
}
}
| 7,869 |
0 | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common/utils/LabelUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mantisrx.common.utils;
import io.mantisrx.common.Label;
import java.util.ArrayList;
import java.util.List;
public class LabelUtils {
public static final String OR_OPERAND = "or";
public static final String AND_OPERAND = "and";
public static final String LABEL_QUERY_PARAM = "labels";
public static final String LABEL_OP_QUERY_PARAM = "labels.op";
public static List<Label> generatePairs(String tagQuery) {
//name1=value1,name2=value2
List<Label> pairList = new ArrayList<>();
if (tagQuery != null) {
String[] toks = tagQuery.split(",");
if (toks != null && toks.length > 0) {
for (int i = 0; i < toks.length; i++) {
String token = toks[i];
String[] pairToks = token.split("=");
if (pairToks != null && pairToks.length == 2) {
pairList.add(new Label(pairToks[0], pairToks[1]));
}
}
}
}
return pairList;
}
public static boolean allPairsPresent(List<Label> expectedTags, List<Label> actualTags) {
for (Label expectedTag : expectedTags) {
if (!actualTags.contains(expectedTag)) {
return false;
}
}
return true;
}
public static boolean somePairsPresent(List<Label> expectedTags, List<Label> actualTags) {
for (Label expectedTag : expectedTags) {
if (actualTags.contains(expectedTag)) {
return true;
}
}
return false;
}
} | 7,870 |
0 | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common/utils/MantisSSEConstants.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mantisrx.common.utils;
public class MantisSSEConstants {
public static final String MANTIS_ENABLE_COMPRESSION = "mantis.EnableCompressedBinary";
public static final String MANTIS_COMPRESSION_DELIMITER = "mantis.CompressionDelimiter";
public static final String SAMPLE_M_SEC = "sampleMSec";
public static final String SAMPLE = "sample";
public static final String META_MESSAGES_SEC = "metaMessagesSec";
public static final String ENABLE_META_MESSAGES = "enableMetaMessages";
public static final String ENABLE_PINGS = "enablePings";
public static final String HEARTBEAT_SEC = "heartbeatSec";
public static final String CLIENT_ID = "clientId";
public static final String GROUP_ID = "groupId";
public static final String SLOT_ID = "slotId";
public static final String ID = "id";
public static final String MQL = "mql";
public static final String TARGET_JOB = "sourceJobName";
}
| 7,871 |
0 | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common/utils/NettyUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mantisrx.common.utils;
import io.mantisrx.common.MantisProperties;
import mantis.io.reactivex.netty.RxNetty;
import mantis.io.reactivex.netty.channel.SingleNioLoopProvider;
public class NettyUtils {
static final String KEY_MAX_THREADS = "mantis.worker.num.cpu";
/**
* The maximum number of netty threads.
*/
static final int MAX_THREADS;
static {
int maxThreads = Integer.getInteger(KEY_MAX_THREADS, 0);
int cpuCount = Runtime.getRuntime().availableProcessors();
int max;
if (maxThreads <= 0 || maxThreads > cpuCount) {
max = cpuCount;
} else {
max = maxThreads;
}
MAX_THREADS = max;
}
public static void setNettyThreads() {
// NJ
String useSingleThreadKey = "JOB_PARAM_mantis.netty.useSingleThread";
String useSingleThreadStr = MantisProperties.getProperty(useSingleThreadKey);
if (useSingleThreadStr != null && !useSingleThreadStr.isEmpty() && useSingleThreadStr.equalsIgnoreCase("true")) {
RxNetty.useEventLoopProvider(new SingleNioLoopProvider(1));
System.out.println(">>>>>>Set Netty to use single thread ");
} else {
RxNetty.useEventLoopProvider(new SingleNioLoopProvider(MAX_THREADS));
System.out.println(String.format("Set Netty to use %s threads", MAX_THREADS));
}
}
}
| 7,872 |
0 | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common/utils/Services.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mantisrx.common.utils;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import io.mantisrx.shaded.com.google.common.util.concurrent.Service;
import io.mantisrx.shaded.com.google.common.util.concurrent.Service.Listener;
import io.mantisrx.shaded.com.google.common.util.concurrent.Service.State;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class Services {
/**
* Equivalent of service.startAsync().awaitRunning() except that this provides a future that
* completes when the service reaches the RUNNING state. if the service on the other hand fails
* to start, then the future is completed exceptionally with the Throwable cause.
*
* @param service service that needs to be started
* @param executor executor to be used for notifying the caller.
* @return a future
*/
public static CompletableFuture<Void> startAsync(Service service, Executor executor) {
Preconditions.checkArgument(service.state() == State.NEW,
"Assumes the service has not been started yet");
final CompletableFuture<Void> result = new CompletableFuture<>();
service.addListener(new Listener() {
@Override
public void running() {
result.complete(null);
}
@Override
public void failed(State from, Throwable failure) {
if (from.ordinal() < State.RUNNING.ordinal()) {
result.completeExceptionally(failure);
}
}
}, executor);
service.startAsync();
return result;
}
/**
* Equivalent service.stopAsync().awaitTerminated() except that this method returns a future
* that gets completed when the service terminated successfully.
*
* @param service service to be stopped
* @param executor executor on which the caller needs to be notified.
* @return future
*/
public static CompletableFuture<Void> awaitAsync(Service service, Executor executor) {
final CompletableFuture<Void> result = new CompletableFuture<>();
service.addListener(new Listener() {
@Override
public void terminated(State from) {
result.complete(null);
}
@Override
public void failed(State from, Throwable failure) {
result.completeExceptionally(failure);
}
}, executor);
if (service.state() == State.FAILED) {
result.completeExceptionally(service.failureCause());
} else if (service.state() == State.TERMINATED) {
result.complete(null);
}
return result;
}
public static CompletableFuture<Void> stopAsync(Service service, Executor executor) {
CompletableFuture<Void> result = awaitAsync(service, executor);
service.stopAsync();
return result;
}
/**
* Attempts to start the service and waits for it to reach the RUNNING state. If the service has
* already started, then this method returns immediately.
*
* @param service
*/
public static void startAndWait(Service service) {
try {
service.startAsync();
} catch (IllegalStateException e) {
log.warn("Service already started: {}", service);
}
service.awaitRunning();
}
}
| 7,873 |
0 | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common | Create_ds/mantis/mantis-common/src/main/java/com/mantisrx/common/utils/ListenerCallQueue.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mantisrx.common.utils;
import static io.mantisrx.shaded.com.google.common.base.Preconditions.checkNotNull;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import io.mantisrx.shaded.com.google.common.collect.Queues;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.Executor;
import java.util.logging.Level;
import java.util.logging.Logger;
import net.jcip.annotations.GuardedBy;
/**
* Copy pasted from
* https://github.com/google/guava/blob/master/guava/src/com/google/common/util/concurrent/ListenerCallQueue.java
* @param <L> type of the listener
*/
public final class ListenerCallQueue<L> {
// TODO(cpovirk): consider using the logger associated with listener.getClass().
private static final Logger logger = Logger.getLogger(ListenerCallQueue.class.getName());
// TODO(chrisn): promote AppendOnlyCollection for use here.
private final List<PerListenerQueue<L>> listeners =
Collections.synchronizedList(new ArrayList<PerListenerQueue<L>>());
/** Method reference-compatible listener event. */
public interface Event<L> {
/** Call a method on the listener. */
void call(L listener);
}
/**
* Adds a listener that will be called using the given executor when events are later {@link
* #enqueue enqueued} and {@link #dispatch dispatched}.
*/
public void addListener(L listener, Executor executor) {
checkNotNull(listener, "listener");
checkNotNull(executor, "executor");
listeners.add(new PerListenerQueue<>(listener, executor));
}
/**
* Enqueues an event to be run on currently known listeners.
*
* <p>The {@code toString} method of the Event itself will be used to describe the event in the
* case of an error.
*
* @param event the callback to execute on {@link #dispatch}
*/
public void enqueue(Event<L> event) {
enqueueHelper(event, event);
}
/**
* Enqueues an event to be run on currently known listeners, with a label.
*
* @param event the callback to execute on {@link #dispatch}
* @param label a description of the event to use in the case of an error
*/
public void enqueue(Event<L> event, String label) {
enqueueHelper(event, label);
}
private void enqueueHelper(Event<L> event, Object label) {
checkNotNull(event, "event");
checkNotNull(label, "label");
synchronized (listeners) {
for (PerListenerQueue<L> queue : listeners) {
queue.add(event, label);
}
}
}
/**
* Dispatches all events enqueued prior to this call, serially and in order, for every listener.
*
* <p>Note: this method is idempotent and safe to call from any thread
*/
public void dispatch() {
// iterate by index to avoid concurrent modification exceptions
for (int i = 0; i < listeners.size(); i++) {
listeners.get(i).dispatch();
}
}
/**
* A special purpose queue/executor that dispatches listener events serially on a configured
* executor. Each event event can be added and dispatched as separate phases.
*
* <p>This class is very similar to {@link SequentialExecutor} with the exception that events can
* be added without necessarily executing immediately.
*/
private static final class PerListenerQueue<L> implements Runnable {
final L listener;
final Executor executor;
@GuardedBy("this")
final Queue<Event<L>> waitQueue = Queues.newArrayDeque();
@GuardedBy("this")
final Queue<Object> labelQueue = Queues.newArrayDeque();
@GuardedBy("this")
boolean isThreadScheduled;
PerListenerQueue(L listener, Executor executor) {
this.listener = checkNotNull(listener);
this.executor = checkNotNull(executor);
}
/** Enqueues a event to be run. */
synchronized void add(ListenerCallQueue.Event<L> event, Object label) {
waitQueue.add(event);
labelQueue.add(label);
}
/**
* Dispatches all listeners {@linkplain #enqueue enqueued} prior to this call, serially and in
* order.
*/
void dispatch() {
boolean scheduleEventRunner = false;
synchronized (this) {
if (!isThreadScheduled) {
isThreadScheduled = true;
scheduleEventRunner = true;
}
}
if (scheduleEventRunner) {
try {
executor.execute(this);
} catch (RuntimeException e) {
// reset state in case of an error so that later dispatch calls will actually do something
synchronized (this) {
isThreadScheduled = false;
}
// Log it and keep going.
logger.log(
Level.SEVERE,
"Exception while running callbacks for " + listener + " on " + executor,
e);
throw e;
}
}
}
@Override
public void run() {
boolean stillRunning = true;
try {
while (true) {
ListenerCallQueue.Event<L> nextToRun;
Object nextLabel;
synchronized (PerListenerQueue.this) {
Preconditions.checkState(isThreadScheduled);
nextToRun = waitQueue.poll();
nextLabel = labelQueue.poll();
if (nextToRun == null) {
isThreadScheduled = false;
stillRunning = false;
break;
}
}
// Always run while _not_ holding the lock, to avoid deadlocks.
try {
nextToRun.call(listener);
} catch (RuntimeException e) {
// Log it and keep going.
logger.log(
Level.SEVERE,
"Exception while executing callback: " + listener + " " + nextLabel,
e);
}
}
} finally {
if (stillRunning) {
// An Error is bubbling up. We should mark ourselves as no longer running. That way, if
// anyone tries to keep using us, we won't be corrupted.
synchronized (PerListenerQueue.this) {
isThreadScheduled = false;
}
}
}
}
}
}
| 7,874 |
0 | Create_ds/mantis/mantis-common/src/jmh/java/io/reactivx/common | Create_ds/mantis/mantis-common/src/jmh/java/io/reactivx/common/compression/CompressionUtilsBenchmark.java | package io.reactivx.common.compression;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import io.mantisrx.common.compression.CompressionUtils;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
public class CompressionUtilsBenchmark {
private static final Random random = new Random();
//
@Benchmark
@BenchmarkMode(Mode.Throughput)
@OutputTimeUnit(TimeUnit.MINUTES)
@Warmup(iterations = 10, time = 3, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 50, time = 3, timeUnit = TimeUnit.SECONDS)
@Threads(1)
public void testBasicStringSplit(Blackhole blackhole, MyState state) throws IOException {
BufferedReader bf = new BufferedReader(new StringReader(state.eventListStr));
StringBuilder sb = new StringBuilder();
String line;
List<String> msseList = new ArrayList<>();
int dollarCnt = 0;
while ((line = bf.readLine()) != null) {
for (int i = 0; i < line.length(); i++) {
if (dollarCnt == 3) {
msseList.add(sb.toString());
dollarCnt = 0;
sb = new StringBuilder();
}
if (line.charAt(i) != '$') {
sb.append(line.charAt(i));
} else {
dollarCnt++;
}
}
}
blackhole.consume(msseList);
//blackhole.consume(state.eventListStr.split("$$"));
//state.sum = state.a + state.b;
}
@Benchmark
@BenchmarkMode(Mode.Throughput)
@Warmup(iterations = 10, time = 3, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 50, time = 3, timeUnit = TimeUnit.SECONDS)
@Threads(1)
public void testBuiltInStringSplit(Blackhole blackhole, MyState state) throws IOException {
BufferedReader bf = new BufferedReader(new StringReader(state.eventListStr));
String line;
List<String> msseList = new ArrayList<>();
StringBuilder outStrB = new StringBuilder();
while ((line = bf.readLine()) != null) {
outStrB.append(line);
}
String[] toks = outStrB.toString().split("\\$\\$\\$");
for (String tok : toks) {
msseList.add(tok);
}
blackhole.consume(msseList);
}
@Benchmark
@BenchmarkMode(Mode.Throughput)
@Warmup(iterations = 10, time = 3, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 50, time = 3, timeUnit = TimeUnit.SECONDS)
@Threads(1)
public void testSnappyCompress(Blackhole blackhole, MyState state) throws IOException {
blackhole.consume(CompressionUtils.compressAndBase64Encode(state.eventList, true));
}
@Benchmark
@BenchmarkMode(Mode.Throughput)
@Warmup(iterations = 10, time = 3, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 50, time = 3, timeUnit = TimeUnit.SECONDS)
@Threads(1)
public void testSnappyDeCompress(Blackhole blackhole, MyState state) throws IOException {
blackhole.consume(CompressionUtils.decompressAndBase64Decode(state.snappyCompressed, true, true));
}
//
@Benchmark
@BenchmarkMode(Mode.Throughput)
@Warmup(iterations = 10, time = 3, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 50, time = 3, timeUnit = TimeUnit.SECONDS)
@Threads(1)
public void testGzipCompress(Blackhole blackhole, MyState state) throws IOException {
blackhole.consume(CompressionUtils.compressAndBase64Encode(state.eventList));
}
@Benchmark
@BenchmarkMode(Mode.Throughput)
@Warmup(iterations = 10, time = 3, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 50, time = 3, timeUnit = TimeUnit.SECONDS)
@Threads(1)
public void testGzipDeCompress(Blackhole blackhole, MyState state) throws IOException {
blackhole.consume(CompressionUtils.decompressAndBase64Decode(state.gzipCompressed, true));
}
public static class RandomString {
private static final char[] symbols;
static {
StringBuilder tmp = new StringBuilder();
for (char ch = '0'; ch <= '9'; ++ch)
tmp.append(ch);
for (char ch = 'a'; ch <= 'z'; ++ch)
tmp.append(ch);
symbols = tmp.toString().toCharArray();
}
private final char[] buf;
public RandomString(int length) {
if (length < 1)
throw new IllegalArgumentException("length < 1: " + length);
buf = new char[length];
}
public String nextString() {
for (int idx = 0; idx < buf.length; ++idx)
buf[idx] = symbols[random.nextInt(symbols.length)];
return new String(buf);
}
}
@State(Scope.Thread)
public static class MyState {
public String eventListStr;
public List<String> eventList = new ArrayList<>();
public String snappyCompressed;
public String gzipCompressed;
@Setup(Level.Trial)
public void doSetup() {
RandomString rs = new RandomString(200);
StringBuilder sb = new StringBuilder();
for (int i = 0; i < random.nextInt(50); i++) {
String s = rs.nextString();
eventList.add(s);
sb.append(s);
sb.append("$$$");
}
snappyCompressed = CompressionUtils.compressAndBase64Encode(eventList, true);
gzipCompressed = CompressionUtils.compressAndBase64Encode(eventList);
eventListStr = sb.toString();
System.out.println("Do Setup");
}
@TearDown(Level.Trial)
public void doTearDown() {
System.out.println("Do TearDown");
}
}
}
| 7,875 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/server/master/persistence/InMemoryPersistenceProvider.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.persistence;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.server.master.store.KeyValueStore;
public class InMemoryPersistenceProvider extends KeyValueBasedPersistenceProvider {
public InMemoryPersistenceProvider() {
super(KeyValueStore.inMemory(), LifecycleEventPublisher.noop());
}
}
| 7,876 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/server/master/persistence/FileBasedStoreTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.persistence;
import static io.mantisrx.master.jobcluster.job.worker.MantisWorkerMetadataImpl.MANTIS_SYSTEM_ALLOCATED_NUM_PORTS;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.mantisrx.common.Label;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.IJobClusterMetadata;
import io.mantisrx.master.jobcluster.JobClusterMetadataImpl;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.master.jobcluster.job.IMantisStageMetadata;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.JobTestHelper;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl;
import io.mantisrx.master.jobcluster.job.MantisStageMetadataImpl;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.server.master.domain.IJobClusterDefinition;
import io.mantisrx.server.master.domain.JobClusterConfig;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.store.FileBasedStore;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import io.mantisrx.shaded.com.google.common.collect.Maps;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.junit.After;
import org.junit.Test;
public class FileBasedStoreTest {
private final ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private final LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl());
private final FileBasedStore fileProvider = new FileBasedStore();
@After
public void tearDown() {
fileProvider.reset();
}
private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName, List<Label> labels) {
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName("myart")
.withSchedulingInfo(new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(new MachineDefinition(1, 10, 10, 10, 2), Lists.newArrayList(), Lists.newArrayList()).build())
.withVersion("0.0.1")
.build();
return new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(clusterConfig)
.withName(clusterName)
.withUser("user")
.withLabels(labels)
.withParameters(Lists.newArrayList())
.withIsReadyForJobMaster(true)
.withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo"))
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.build();
}
@Test
public void testCreateJob() {
String clusterName = "testCreateJob";
FileBasedPersistenceProvider sProvider = new FileBasedPersistenceProvider(fileProvider, eventPublisher);
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName);
JobDefinition jobDefinition;
try {
jobDefinition = JobTestHelper.generateJobDefinition(clusterName);
JobId jobId = JobId.fromId(clusterName + "-1").get();
IMantisJobMetadata mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(jobId)
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefinition)
.build();
sProvider.storeNewJob(mantisJobMetaData);
SchedulingInfo schedInfo = jobDefinition.getSchedulingInfo();
int numStages = schedInfo.getStages().size();
for(int s=1; s<=numStages; s++) {
StageSchedulingInfo stage = schedInfo.getStages().get(s);
IMantisStageMetadata msmd = new MantisStageMetadataImpl.Builder().
withJobId(jobId)
.withStageNum(s)
.withNumStages(1)
.withMachineDefinition(stage.getMachineDefinition())
.withNumWorkers(stage.getNumberOfInstances())
.withHardConstraints(stage.getHardConstraints())
.withSoftConstraints(stage.getSoftConstraints())
.withScalingPolicy(stage.getScalingPolicy())
.isScalable(stage.getScalable())
.build();
((MantisJobMetadataImpl)mantisJobMetaData).addJobStageIfAbsent(msmd);
sProvider.updateMantisStage(msmd);
for(int w=0; w<stage.getNumberOfInstances(); w++) {
JobWorker mwmd = new JobWorker.Builder()
.withJobId(jobId)
.withWorkerIndex(w)
.withWorkerNumber(1)
.withNumberOfPorts(1 + MANTIS_SYSTEM_ALLOCATED_NUM_PORTS)
.withWorkerPorts(new WorkerPorts(ImmutableList.of(9091, 9092, 9093, 9094, 9095)))
.withStageNum(w+1)
.withLifecycleEventsPublisher(eventPublisher)
.build();
((MantisJobMetadataImpl)mantisJobMetaData).addWorkerMetadata(1, mwmd);
sProvider.storeWorker(mwmd.getMetadata());
}
}
Optional<IMantisJobMetadata> loadedJobMetaOp = sProvider.loadActiveJob(jobId.getId());
assertTrue(loadedJobMetaOp.isPresent());
IMantisJobMetadata loadedJobMeta = loadedJobMetaOp.get();
System.out.println("Original Job -> " + mantisJobMetaData);
System.out.println("Loaded Job ->" + loadedJobMeta);
isEqual(mantisJobMetaData, loadedJobMeta);
} catch(Exception e) {
e.printStackTrace();
fail();
}
}
private void isEqual(IMantisJobMetadata orig, IMantisJobMetadata loaded) {
assertEquals(orig.getJobId(), loaded.getJobId());
assertEquals(orig.getSubmittedAtInstant(), loaded.getSubmittedAtInstant());
assertEquals(orig.getSubscriptionTimeoutSecs(), loaded.getSubscriptionTimeoutSecs());
assertEquals(orig.getState(),loaded.getState());
assertEquals(orig.getNextWorkerNumberToUse(), loaded.getNextWorkerNumberToUse());
System.out.println("Orig JobDefn: " + orig.getJobDefinition());
System.out.println("load JobDefn: " + loaded.getJobDefinition());
assertEquals(orig.getJobDefinition().toString(),loaded.getJobDefinition().toString());
assertEquals(((MantisJobMetadataImpl)orig).getStageMetadata().size(),((MantisJobMetadataImpl)loaded).getStageMetadata().size());
assertEquals(((MantisJobMetadataImpl)orig).getTotalStages(),((MantisJobMetadataImpl)loaded).getTotalStages());
for(int s = 1; s <= ((MantisJobMetadataImpl)orig).getTotalStages(); s++) {
assertTrue(((MantisJobMetadataImpl)loaded).getStageMetadata(s).isPresent());
System.out.println("orig stage: " + ((MantisJobMetadataImpl)orig).getStageMetadata(s).get());
System.out.println("load stage: " + ((MantisJobMetadataImpl)loaded).getStageMetadata(s).get());
assertEquals(((MantisJobMetadataImpl)orig).getStageMetadata(s).get().toString(),((MantisJobMetadataImpl)loaded).getStageMetadata(s).get().toString());
}
}
// @Test
public void serde() throws IOException {
String clusterName = "testCreateClusterClueter";
File tmpFile = new File("/tmp/MantisSpool/jobClusters" + "/" + clusterName);
tmpFile.createNewFile();
IJobClusterDefinition jobClusterDefn = createFakeJobClusterDefn(clusterName, Lists.newArrayList());
PrintWriter pwrtr = new PrintWriter(tmpFile);
mapper.writeValue(pwrtr, jobClusterDefn);
try (FileInputStream fis = new FileInputStream(tmpFile)) {
IJobClusterDefinition jobClustermeta = mapper.readValue(fis, JobClusterDefinitionImpl.class);
System.out.println("read: " + jobClustermeta.getName());
} catch (Exception e) {
e.printStackTrace();
}
}
@Test
public void testCreateAndGetJobCluster() {
FileBasedPersistenceProvider sProvider = new FileBasedPersistenceProvider(fileProvider, eventPublisher);
String clusterName = "testCreateClusterClueter";
JobClusterDefinitionImpl jobClusterDefn = createFakeJobClusterDefn(clusterName, Lists.newArrayList());
IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder().withLastJobCount(0).withJobClusterDefinition(jobClusterDefn).build();
try {
sProvider.createJobCluster(jobCluster);
Optional<IJobClusterMetadata> readDataOp = sProvider.loadAllJobClusters().stream()
.filter(jc -> clusterName.equals(jc.getJobClusterDefinition().getName())).findFirst();
if(readDataOp.isPresent()) {
assertEquals(clusterName, readDataOp.get().getJobClusterDefinition().getName());
} else {
fail();
}
} catch(Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testUpdateJobCluster() {
FileBasedPersistenceProvider sProvider = new FileBasedPersistenceProvider(fileProvider, eventPublisher);
String clusterName = "testUpdateJobCluster";
JobClusterDefinitionImpl jobClusterDefn = createFakeJobClusterDefn(clusterName, Lists.newArrayList());
IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder().withLastJobCount(0).withJobClusterDefinition(jobClusterDefn).build();
try {
sProvider.createJobCluster(jobCluster);
Optional<IJobClusterMetadata> readDataOp = sProvider.loadAllJobClusters().stream()
.filter(jc -> clusterName.equals(jc.getJobClusterDefinition().getName())).findFirst();
if(readDataOp.isPresent()) {
assertEquals(clusterName, readDataOp.get().getJobClusterDefinition().getName());
assertEquals(0, readDataOp.get().getJobClusterDefinition().getLabels().size());
} else {
fail();
}
List<Label> labels = Lists.newArrayList();
labels.add(new Label("label1", "label1value"));
jobClusterDefn = createFakeJobClusterDefn(clusterName, labels);
IJobClusterMetadata jobClusterUpdated = new JobClusterMetadataImpl.Builder().withLastJobCount(0).withJobClusterDefinition(jobClusterDefn).build();
sProvider.updateJobCluster(jobClusterUpdated);
readDataOp = sProvider.loadAllJobClusters().stream()
.filter(jc -> clusterName.equals(jc.getJobClusterDefinition().getName())).findFirst();
if(readDataOp.isPresent()) {
assertEquals(clusterName, readDataOp.get().getJobClusterDefinition().getName());
assertEquals(1, readDataOp.get().getJobClusterDefinition().getLabels().size());
} else {
fail();
}
} catch(Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testGetAllJobClusters() throws Exception {
FileBasedPersistenceProvider sProvider = new FileBasedPersistenceProvider(fileProvider, eventPublisher);
String clusterPrefix = "testGetAllJobClustersCluster";
for(int i=0; i<5; i++) {
JobClusterDefinitionImpl jobClusterDefn = createFakeJobClusterDefn(clusterPrefix + "_" + i, Lists.newArrayList());
IJobClusterMetadata jobCluster = new JobClusterMetadataImpl.Builder().withLastJobCount(0).withJobClusterDefinition(jobClusterDefn).build();
sProvider.createJobCluster(jobCluster);
}
List<IJobClusterMetadata> jobClusterList = sProvider.loadAllJobClusters();
assertTrue(jobClusterList.size() >= 5);
Map<String, IJobClusterMetadata> clustersMap = Maps.newHashMap();
for(IJobClusterMetadata cluster : jobClusterList) {
clustersMap.put(cluster.getJobClusterDefinition().getName(), cluster);
}
for(int i=0; i<5; i++) {
assertTrue(clustersMap.containsKey(clusterPrefix + "_" + i));
}
}
}
| 7,877 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/server/master/domain/DataFormatAdapterTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import static java.util.Optional.of;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.mantisrx.common.Label;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.IJobClusterMetadata;
import io.mantisrx.master.jobcluster.JobClusterMetadataImpl;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.master.jobcluster.job.IMantisStageMetadata;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl;
import io.mantisrx.master.jobcluster.job.MantisStageMetadataImpl;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.master.jobcluster.job.worker.MantisWorkerMetadataImpl;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.NamedJobDefinition;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.store.MantisJobMetadata;
import io.mantisrx.server.master.store.MantisStageMetadataWritable;
import io.mantisrx.server.master.store.MantisWorkerMetadataWritable;
import io.mantisrx.server.master.store.NamedJob;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.time.Instant;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.junit.Test;
public class DataFormatAdapterTest {
public static final MachineDefinition DEFAULT_MACHINE_DEFINITION = new MachineDefinition(1, 10, 10, 10, 2);
private static final SchedulingInfo DEFAULT_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(DEFAULT_MACHINE_DEFINITION, Lists.newArrayList(), Lists.newArrayList()).build();
private final LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl());
@Test
public void jobClusterConfigToJarTest() {
long uploadedAt = 1234l;
String artifactName = "artifact1";
String version = "0.0.1";
JobClusterConfig config = new JobClusterConfig(artifactName, uploadedAt, version, DEFAULT_SCHED_INFO);
try {
NamedJob.Jar convertedJar = DataFormatAdapter.convertJobClusterConfigToJar(config);
assertEquals(uploadedAt, convertedJar.getUploadedAt());
assertEquals("http://" + artifactName, convertedJar.getUrl().toString());
assertEquals(version, convertedJar.getVersion());
assertEquals(DEFAULT_SCHED_INFO,convertedJar.getSchedulingInfo());
JobClusterConfig regeneratedConfig = DataFormatAdapter.convertJarToJobClusterConfig(convertedJar);
assertEquals(uploadedAt, regeneratedConfig.getUploadedAt());
assertEquals(artifactName, regeneratedConfig.getArtifactName());
assertEquals(version, regeneratedConfig.getVersion());
assertEquals(DEFAULT_SCHED_INFO, regeneratedConfig.getSchedulingInfo());
} catch (MalformedURLException e) {
fail();
e.printStackTrace();
}
}
@Test
public void artifactNameTest() {
String artifactName = "myartifact-0.0.1.zip";
String version = "0.0.1";
try {
URL jar = DataFormatAdapter.generateURL(artifactName);
assertEquals("http://myartifact-0.0.1.zip", jar.toString());
assertEquals(artifactName, DataFormatAdapter.extractArtifactName(jar).orElse(""));
} catch (MalformedURLException e) {
e.printStackTrace();
fail();
}
}
@Test
public void artifactNameTest2() {
String artifactName = "https://myartifact-0.0.1.zip";
String version = "0.0.1";
try {
URL jar = DataFormatAdapter.generateURL(artifactName);
assertEquals("https://myartifact-0.0.1.zip", jar.toString());
assertEquals("myartifact-0.0.1.zip", DataFormatAdapter.extractArtifactName(jar).orElse(""));
} catch (MalformedURLException e) {
e.printStackTrace();
fail();
}
}
@Test
public void extractArtifactNameTest1() throws MalformedURLException {
URL url = new URL("http://mantisui.eu-west-1.dyntest.netflix.net/mantis-artifacts/nfmantis-sources-genericqueryable-source-6.0.8.zip");
assertEquals("nfmantis-sources-genericqueryable-source-6.0.8.zip",DataFormatAdapter.extractArtifactName(url).orElse(""));
}
@Test
public void extractArtifactNameTest2() throws MalformedURLException {
URL url = new URL("http://nfmantis-sources-genericqueryable-source-6.0.8.zip");
assertEquals("nfmantis-sources-genericqueryable-source-6.0.8.zip",DataFormatAdapter.extractArtifactName(url).orElse(""));
}
@Test
public void slaConversionTestWithCronSpec() {
int min = 1;
int max = 10;
String cronSpec = "0 0 0-23 * * ?";
io.mantisrx.server.master.domain.SLA sla = new SLA(min, max, cronSpec,IJobClusterDefinition.CronPolicy.KEEP_EXISTING);
NamedJob.SLA oldSlaFormat = DataFormatAdapter.convertSLAToNamedJobSLA(sla);
// assertEquals(min, oldSlaFormat.getMin());
// assertEquals(max, oldSlaFormat.getMax());
assertEquals(cronSpec, oldSlaFormat.getCronSpec());
assertEquals(NamedJobDefinition.CronPolicy.KEEP_EXISTING,oldSlaFormat.getCronPolicy());
SLA reconvertedSLA = DataFormatAdapter.convertToSLA(oldSlaFormat);
assertEquals(sla, reconvertedSLA);
}
@Test
public void slaConversionTestNoCronSpec() {
int min = 1;
int max = 10;
String cronSpec = "0 0 0-23 * * ?";
io.mantisrx.server.master.domain.SLA sla = new SLA(min, max, null,null);
NamedJob.SLA oldSlaFormat = DataFormatAdapter.convertSLAToNamedJobSLA(sla);
assertEquals(min, oldSlaFormat.getMin());
assertEquals(max, oldSlaFormat.getMax());
// assertEquals(cronSpec, oldSlaFormat.getCronSpec());
// assertEquals(NamedJobDefinition.CronPolicy.KEEP_EXISTING,oldSlaFormat.getCronPolicy());
SLA reconvertedSLA = DataFormatAdapter.convertToSLA(oldSlaFormat);
assertEquals(sla, reconvertedSLA);
}
@Test
public void jobClusterMetadataConversionTest() {
String artifactName = "artifact1";
String version = "0.0.1";
List<Parameter> parameterList = new ArrayList<>();
Parameter parameter = new Parameter("param1", "value1");
parameterList.add(parameter);
List<Label> labels = new ArrayList<>();
Label label = new Label("label1", "labelvalue1");
labels.add(label);
long uAt = 1234l;
JobClusterConfig jobClusterConfig = new JobClusterConfig.Builder()
.withArtifactName(artifactName)
.withSchedulingInfo(DEFAULT_SCHED_INFO)
.withVersion(version)
.withUploadedAt(uAt)
.build();
String clusterName = "clusterName1";
JobOwner owner = new JobOwner("Neeraj", "Mantis", "desc", "nma@netflix.com", "repo");
boolean isReadyForMaster = true;
SLA sla = new SLA(1, 10, null, null);
JobClusterDefinitionImpl clusterDefn = new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(jobClusterConfig)
.withName(clusterName)
.withUser("user1")
.withIsReadyForJobMaster(isReadyForMaster)
.withOwner(owner)
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.withSla(sla)
.withParameters(parameterList)
.withLabels(labels)
.build();
int lastJobCnt = 10;
boolean disabled = false;
IJobClusterMetadata clusterMeta = new JobClusterMetadataImpl.Builder()
.withJobClusterDefinition(clusterDefn)
.withLastJobCount(lastJobCnt)
.withIsDisabled(disabled)
.build();
NamedJob namedJob = DataFormatAdapter.convertJobClusterMetadataToNamedJob(clusterMeta);
assertEquals(disabled,namedJob.getDisabled());
assertEquals(clusterName, namedJob.getName());
assertEquals(lastJobCnt,namedJob.getLastJobCount());
assertEquals(1, namedJob.getLabels().size());
assertEquals(label, namedJob.getLabels().get(0));
assertEquals(owner, namedJob.getOwner());
assertEquals(isReadyForMaster, namedJob.getIsReadyForJobMaster());
assertEquals(WorkerMigrationConfig.DEFAULT, namedJob.getMigrationConfig());
// assert parameters
assertEquals(parameterList.size(), namedJob.getParameters().size());
assertEquals(parameter, namedJob.getParameters().get(0));
// assert sla
assertEquals(sla.getMin(), namedJob.getSla().getMin());
assertEquals(sla.getMax(), namedJob.getSla().getMax());
// assert jar info
assertEquals(1, namedJob.getJars().size());
// jar info
NamedJob.Jar jar = namedJob.getJars().get(0);
assertEquals(uAt, jar.getUploadedAt());
assertEquals(DEFAULT_SCHED_INFO,jar.getSchedulingInfo());
assertEquals(version, jar.getVersion());
assertEquals(artifactName, DataFormatAdapter.extractArtifactName(jar.getUrl()).orElse(""));
IJobClusterMetadata reconvertedJobCluster = DataFormatAdapter.convertNamedJobToJobClusterMetadata(namedJob);
assertEquals(disabled,reconvertedJobCluster.isDisabled());
assertEquals(clusterName,reconvertedJobCluster.getJobClusterDefinition().getName());
assertEquals(lastJobCnt,reconvertedJobCluster.getLastJobCount());
assertEquals(1, reconvertedJobCluster.getJobClusterDefinition().getLabels().size());
assertEquals(label, reconvertedJobCluster.getJobClusterDefinition().getLabels().get(0));
assertEquals(owner, reconvertedJobCluster.getJobClusterDefinition().getOwner());
assertEquals(isReadyForMaster,reconvertedJobCluster.getJobClusterDefinition().getIsReadyForJobMaster());
assertEquals(WorkerMigrationConfig.DEFAULT,reconvertedJobCluster.getJobClusterDefinition().getWorkerMigrationConfig());
assertEquals(parameterList.size(), reconvertedJobCluster.getJobClusterDefinition().getParameters().size());
assertEquals(parameter, reconvertedJobCluster.getJobClusterDefinition().getParameters().get(0));
assertEquals(sla.getMin(), reconvertedJobCluster.getJobClusterDefinition().getSLA().getMin());
assertEquals(sla.getMax(), reconvertedJobCluster.getJobClusterDefinition().getSLA().getMax());
JobClusterConfig clusterConfig1 = reconvertedJobCluster.getJobClusterDefinition().getJobClusterConfig();
assertEquals(uAt,clusterConfig1.getUploadedAt());
assertEquals(DEFAULT_SCHED_INFO,clusterConfig1.getSchedulingInfo());
assertEquals(version,clusterConfig1.getVersion());
assertEquals(artifactName, clusterConfig1.getArtifactName());
}
@Test
public void completedJobToNamedJobCompletedJobTest() {
String name = "name";
String jobId = "name-1";
String version = "0.0.1";
JobState jobState = JobState.Completed;
long submittedAt = 1234l;
long terminatedAt = 2234l;
String me = "me";
List<Label> labels = new ArrayList<>();
labels.add(new Label("l1","v1"));
JobClusterDefinitionImpl.CompletedJob cJob = new JobClusterDefinitionImpl.CompletedJob(
name, jobId, version, jobState, submittedAt, terminatedAt, me, labels);
NamedJob.CompletedJob njobCJob = DataFormatAdapter.convertCompletedJobToNamedJobCompletedJob(cJob);
assertEquals(name,njobCJob.getName());
assertEquals(jobId,njobCJob.getJobId());
assertEquals(version,njobCJob.getVersion());
assertEquals(MantisJobState.Completed,njobCJob.getState());
assertEquals(submittedAt,njobCJob.getSubmittedAt());
assertEquals(terminatedAt,njobCJob.getTerminatedAt());
JobClusterDefinitionImpl.CompletedJob reconverted = DataFormatAdapter.convertNamedJobCompletedJobToCompletedJob(njobCJob);
assertEquals(cJob,reconverted);
}
@Test
public void oldMantisWorkerMetadataReadTest() throws IOException {
ObjectMapper mapper = new ObjectMapper().registerModule(new Jdk8Module());
final String oldWorkerMetadataWriteableStr = "{\n" +
" \"workerIndex\": 0,\n" +
" \"workerNumber\": 1,\n" +
" \"jobId\": \"cname-1\",\n" +
" \"stageNum\": 1,\n" +
" \"numberOfPorts\": 3,\n" +
" \"metricsPort\": 1,\n" +
" \"consolePort\": 3,\n" +
" \"debugPort\": 2,\n" +
" \"customPort\": 5,\n" +
" \"ports\": [4],\n" +
" \"state\": \"Completed\",\n" +
" \"slave\": \"slave1\",\n" +
" \"slaveID\": \"slaveId1\",\n" +
" \"cluster\": \"prefCluster\",\n" +
" \"acceptedAt\": 999,\n" +
" \"launchedAt\": 1000,\n" +
" \"startingAt\": 1234,\n" +
" \"startedAt\": 1001,\n" +
" \"completedAt\": 2000,\n" +
" \"reason\": \"Normal\",\n" +
" \"resubmitOf\": 42,\n" +
" \"totalResubmitCount\": 1\n" +
"}";
MantisWorkerMetadataWritable oldMetadataWritable = mapper.readValue(oldWorkerMetadataWriteableStr, MantisWorkerMetadataWritable.class);
Optional<String> prefCluster = of("prefCluster");
int metricsPort = 1;
int debugPort = 2;
int consolePort = 3;
int customPort = 5;
int ssePort = 4;
List<Integer> ports = Lists.newArrayList();
ports.add(metricsPort);
ports.add(debugPort);
ports.add(consolePort);
ports.add(customPort);
ports.add(ssePort);
WorkerPorts workerPorts = new WorkerPorts(ports);
int workerNum = 1;
int workerIndex = 0;
long startingAt = 1234l;
int stageNum = 1;
String slaveid = "slaveId1";
String slave = "slave1";
int resubmitCnt = 1;
int portNums = ports.size();
long launchedAt = 1000l;
JobId jobId = new JobId("cname", 1);
long acceptedAt = 999l;
long completedAt = 2000l;
long startedAt = 1001l;
int resubOf = 42;
JobWorker worker = new JobWorker.Builder()
.withPreferredCluster(prefCluster)
.withJobCompletedReason(JobCompletedReason.Normal)
.withWorkerPorts(workerPorts)
.withWorkerNumber(workerNum)
.withWorkerIndex(workerIndex)
.withState(WorkerState.Completed)
.withStartingAt(startingAt)
.withStartedAt(startedAt)
.withCompletedAt(completedAt)
.withStageNum(stageNum)
.withSlaveID(slaveid)
.withSlave(slave)
.withResubmitCount(resubmitCnt)
.withResubmitOf(resubOf)
.withNumberOfPorts(portNums)
.withLaunchedAt(launchedAt)
.withJobId(jobId)
.withAcceptedAt(acceptedAt)
.withLifecycleEventsPublisher(eventPublisher)
.build();
IMantisWorkerMetadata expectedWorkerMeta = worker.getMetadata();
assertEquals(prefCluster,oldMetadataWritable.getCluster());
assertEquals(workerIndex, oldMetadataWritable.getWorkerIndex());
assertEquals(workerNum, oldMetadataWritable.getWorkerNumber());
assertEquals(jobId.getId(),oldMetadataWritable.getJobId());
assertEquals(acceptedAt,oldMetadataWritable.getAcceptedAt());
assertEquals(startingAt,oldMetadataWritable.getStartingAt());
assertEquals(startedAt, oldMetadataWritable.getStartedAt());
assertEquals(launchedAt, oldMetadataWritable.getLaunchedAt());
assertEquals(completedAt, oldMetadataWritable.getCompletedAt());
assertEquals(stageNum, oldMetadataWritable.getStageNum());
assertEquals(slave, oldMetadataWritable.getSlave());
assertEquals(slaveid, oldMetadataWritable.getSlaveID());
assertEquals(metricsPort, oldMetadataWritable.getMetricsPort());
assertEquals(consolePort, oldMetadataWritable.getConsolePort());
assertEquals(debugPort, oldMetadataWritable.getDebugPort());
assertEquals(5, oldMetadataWritable.getCustomPort());
assertEquals(MantisJobState.Completed, oldMetadataWritable.getState());
assertEquals(resubmitCnt, oldMetadataWritable.getTotalResubmitCount());
assertEquals(resubOf, oldMetadataWritable.getResubmitOf());
assertEquals(3, oldMetadataWritable.getNumberOfPorts());
assertEquals(1, oldMetadataWritable.getPorts().size());
assertEquals(ssePort, (long)oldMetadataWritable.getPorts().get(0));
assertEquals(JobCompletedReason.Normal, oldMetadataWritable.getReason());
JobWorker convertedMetadata = DataFormatAdapter.convertMantisWorkerMetadataWriteableToMantisWorkerMetadata(oldMetadataWritable, eventPublisher);
assertEquals(expectedWorkerMeta, convertedMetadata.getMetadata());
}
@Test
public void mantisWorkerMetadataReadTest() throws IOException {
ObjectMapper mapper = new ObjectMapper().registerModule(new Jdk8Module());
final String oldWorkerMetadataWriteableStr = "{\n" +
" \"workerIndex\": 0,\n" +
" \"workerNumber\": 1,\n" +
" \"jobId\": \"cname-1\",\n" +
" \"stageNum\": 1,\n" +
" \"numberOfPorts\": 3,\n" +
" \"metricsPort\": 1,\n" +
" \"consolePort\": 3,\n" +
" \"debugPort\": 2,\n" +
" \"customPort\": 5,\n" +
" \"ports\": [4],\n" +
" \"state\": \"Completed\",\n" +
" \"slave\": \"slave1\",\n" +
" \"slaveID\": \"slaveId1\",\n" +
" \"cluster\": \"prefCluster\",\n" +
" \"acceptedAt\": 999,\n" +
" \"launchedAt\": 1000,\n" +
" \"startingAt\": 1234,\n" +
" \"startedAt\": 1001,\n" +
" \"completedAt\": 2000,\n" +
" \"reason\": \"Normal\",\n" +
" \"resubmitOf\": 42,\n" +
" \"totalResubmitCount\": 1,\n" +
" \"resourceCluster\": {\"resourceID\": \"resourceCluster\"}\n" +
"}";
MantisWorkerMetadataWritable metadataWritable = mapper.readValue(oldWorkerMetadataWriteableStr, MantisWorkerMetadataWritable.class);
Optional<String> prefCluster = of("prefCluster");
ClusterID resourceCluster = ClusterID.of("resourceCluster");
int metricsPort = 1;
int debugPort = 2;
int consolePort = 3;
int customPort = 5;
int ssePort = 4;
List<Integer> ports = Lists.newArrayList();
ports.add(metricsPort);
ports.add(debugPort);
ports.add(consolePort);
ports.add(customPort);
ports.add(ssePort);
WorkerPorts workerPorts = new WorkerPorts(ports);
int workerNum = 1;
int workerIndex = 0;
long startingAt = 1234l;
int stageNum = 1;
String slaveid = "slaveId1";
String slave = "slave1";
int resubmitCnt = 1;
int portNums = ports.size();
long launchedAt = 1000l;
JobId jobId = new JobId("cname", 1);
long acceptedAt = 999l;
long completedAt = 2000l;
long startedAt = 1001l;
int resubOf = 42;
JobWorker worker = new JobWorker.Builder()
.withPreferredCluster(prefCluster)
.withResourceCluster(resourceCluster)
.withJobCompletedReason(JobCompletedReason.Normal)
.withWorkerPorts(workerPorts)
.withWorkerNumber(workerNum)
.withWorkerIndex(workerIndex)
.withState(WorkerState.Completed)
.withStartingAt(startingAt)
.withStartedAt(startedAt)
.withCompletedAt(completedAt)
.withStageNum(stageNum)
.withSlaveID(slaveid)
.withSlave(slave)
.withResubmitCount(resubmitCnt)
.withResubmitOf(resubOf)
.withNumberOfPorts(portNums)
.withLaunchedAt(launchedAt)
.withJobId(jobId)
.withAcceptedAt(acceptedAt)
.withLifecycleEventsPublisher(eventPublisher)
.build();
IMantisWorkerMetadata expectedWorkerMeta = worker.getMetadata();
assertEquals(prefCluster,metadataWritable.getCluster());
assertEquals(resourceCluster,metadataWritable.getResourceCluster().get());
assertEquals(workerIndex, metadataWritable.getWorkerIndex());
assertEquals(workerNum, metadataWritable.getWorkerNumber());
assertEquals(jobId.getId(),metadataWritable.getJobId());
assertEquals(acceptedAt,metadataWritable.getAcceptedAt());
assertEquals(startingAt,metadataWritable.getStartingAt());
assertEquals(startedAt, metadataWritable.getStartedAt());
assertEquals(launchedAt, metadataWritable.getLaunchedAt());
assertEquals(completedAt, metadataWritable.getCompletedAt());
assertEquals(stageNum, metadataWritable.getStageNum());
assertEquals(slave, metadataWritable.getSlave());
assertEquals(slaveid, metadataWritable.getSlaveID());
assertEquals(metricsPort, metadataWritable.getMetricsPort());
assertEquals(consolePort, metadataWritable.getConsolePort());
assertEquals(debugPort, metadataWritable.getDebugPort());
assertEquals(5, metadataWritable.getCustomPort());
assertEquals(MantisJobState.Completed, metadataWritable.getState());
assertEquals(resubmitCnt, metadataWritable.getTotalResubmitCount());
assertEquals(resubOf, metadataWritable.getResubmitOf());
assertEquals(3, metadataWritable.getNumberOfPorts());
assertEquals(1, metadataWritable.getPorts().size());
assertEquals(ssePort, (long)metadataWritable.getPorts().get(0));
assertEquals(JobCompletedReason.Normal, metadataWritable.getReason());
JobWorker convertedMetadata = DataFormatAdapter.convertMantisWorkerMetadataWriteableToMantisWorkerMetadata(metadataWritable, eventPublisher);
assertEquals(expectedWorkerMeta, convertedMetadata.getMetadata());
}
@Test
public void mantisWorkerMetadataToMetadataWritebleTest() {
Optional<String> prefCluster = of("prefCluster");
ClusterID resourceCluster = ClusterID.of("resourceCluster");
int metricsPort = 1;
int debugPort = 2;
int consolePort = 3;
int customPort = 4;
int ssePort = 5;
List<Integer> ports = Lists.newArrayList();
ports.add(metricsPort);
ports.add(debugPort);
ports.add(consolePort);
ports.add(customPort);
ports.add(ssePort);
WorkerPorts workerPorts = new WorkerPorts(ports);
int workerNum = 1;
int workerIndex = 0;
long startingAt = 1234l;
int stageNum = 1;
String slaveid = "slaveId1";
String slave = "slave1";
int resubmitCnt = 1;
int portNums = ports.size();
long launchedAt = 1000l;
JobId jobId = new JobId("cname", 1);
long acceptedAt = 999l;
long completedAt = 2000l;
long startedAt = 1001l;
int resubOf = 42;
JobWorker worker = new JobWorker.Builder()
.withPreferredCluster(prefCluster)
.withResourceCluster(resourceCluster)
.withJobCompletedReason(JobCompletedReason.Normal)
.withWorkerPorts(workerPorts)
.withWorkerNumber(workerNum)
.withWorkerIndex(workerIndex)
.withState(WorkerState.Completed)
.withStartingAt(startingAt)
.withStartedAt(startedAt)
.withCompletedAt(completedAt)
.withStageNum(stageNum)
.withSlaveID(slaveid)
.withSlave(slave)
.withResubmitCount(resubmitCnt)
.withResubmitOf(resubOf)
.withNumberOfPorts(portNums)
.withLaunchedAt(launchedAt)
.withJobId(jobId)
.withAcceptedAt(acceptedAt)
.withLifecycleEventsPublisher(eventPublisher)
.build();
IMantisWorkerMetadata workerMeta = worker.getMetadata();
MantisWorkerMetadataWritable metadataWritable = DataFormatAdapter.convertMantisWorkerMetadataToMantisWorkerMetadataWritable(workerMeta);
assertEquals(prefCluster,metadataWritable.getCluster());
assertEquals(resourceCluster, metadataWritable.getResourceCluster().get());
assertEquals(workerIndex, metadataWritable.getWorkerIndex());
assertEquals(workerNum, metadataWritable.getWorkerNumber());
assertEquals(jobId.getId(),metadataWritable.getJobId());
assertEquals(acceptedAt,metadataWritable.getAcceptedAt());
assertEquals(startingAt,metadataWritable.getStartingAt());
assertEquals(startedAt, metadataWritable.getStartedAt());
assertEquals(launchedAt, metadataWritable.getLaunchedAt());
assertEquals(completedAt, metadataWritable.getCompletedAt());
assertEquals(stageNum, metadataWritable.getStageNum());
assertEquals(slave, metadataWritable.getSlave());
assertEquals(slaveid, metadataWritable.getSlaveID());
assertEquals(metricsPort, metadataWritable.getMetricsPort());
assertEquals(consolePort, metadataWritable.getConsolePort());
assertEquals(debugPort, metadataWritable.getDebugPort());
assertEquals(customPort, metadataWritable.getCustomPort());
assertEquals(MantisJobState.Completed, metadataWritable.getState());
assertEquals(resubmitCnt, metadataWritable.getTotalResubmitCount());
assertEquals(resubOf, metadataWritable.getResubmitOf());
assertEquals(portNums, metadataWritable.getNumberOfPorts());
assertEquals(1, metadataWritable.getPorts().size());
assertEquals(ssePort, (long)metadataWritable.getPorts().get(0));
assertEquals(JobCompletedReason.Normal, metadataWritable.getReason());
JobWorker reconverted = DataFormatAdapter.convertMantisWorkerMetadataWriteableToMantisWorkerMetadata(metadataWritable, eventPublisher);
assertEquals(workerMeta, reconverted.getMetadata());
}
@Test
public void convertMantisStageMetaTest() {
Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>();
smap.put(StageScalingPolicy.ScalingReason.CPU, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.CPU, 0.5, 0.75, null));
smap.put(StageScalingPolicy.ScalingReason.DataDrop, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.DataDrop, 0.0, 2.0, null));
int stageNo = 1;
int min = 3;
int max = 10;
int increment = 1;
int decrement = 1;
int coolDownSecs = 300;
StageScalingPolicy stageScalingPolicy = new StageScalingPolicy(stageNo, min, max, increment, decrement, coolDownSecs, smap);
List<JobConstraints> softConstraintsList = new ArrayList<>();
softConstraintsList.add(JobConstraints.ExclusiveHost);
List<JobConstraints> hardConstraintsList = new ArrayList<>();
hardConstraintsList.add(JobConstraints.M3Cluster);
JobId jobId = new JobId("cName",1);
int numWorkers = 1;
int numStages = 2;
boolean isScalable = true;
IMantisStageMetadata stageMeta = new MantisStageMetadataImpl.Builder()
.withStageNum(stageNo)
.withScalingPolicy(stageScalingPolicy)
.withNumWorkers(numWorkers)
.withMachineDefinition(DEFAULT_MACHINE_DEFINITION)
.withNumStages(numStages)
.withSoftConstraints(softConstraintsList)
.withHardConstraints(hardConstraintsList)
.withJobId(jobId)
.isScalable(isScalable)
.build();
MantisStageMetadataWritable stageMetadataWritable = DataFormatAdapter.convertMantisStageMetadataToMantisStageMetadataWriteable(stageMeta);
assertEquals(jobId.getId(),stageMetadataWritable.getJobId());
assertEquals(JobConstraints.M3Cluster,stageMetadataWritable.getHardConstraints().get(0));
assertEquals(JobConstraints.ExclusiveHost, stageMetadataWritable.getSoftConstraints().get(0));
assertEquals(stageScalingPolicy, stageMetadataWritable.getScalingPolicy());
assertTrue(stageMetadataWritable.getScalable());
assertEquals(DEFAULT_MACHINE_DEFINITION, stageMetadataWritable.getMachineDefinition());
assertEquals(numWorkers, stageMetadataWritable.getNumWorkers());
assertEquals(numStages, stageMetadataWritable.getNumStages());
assertEquals(stageNo,stageMetadataWritable.getStageNum());
IMantisStageMetadata reconverted = DataFormatAdapter.convertMantisStageMetadataWriteableToMantisStageMetadata(stageMetadataWritable, eventPublisher);
assertEquals(stageMeta,reconverted);
}
@Test
public void convertMantisJobWriteableTest() throws Exception {
String artifactName = "artifact";
String version = "1.0.0";
String clusterName = "myCluster";
List<Label> labels = new ArrayList<>();
Label label = new Label("myLable","myVal");
labels.add(label);
List<Parameter> params = new ArrayList<>();
Parameter param = new Parameter("myparam", "myval");
params.add(param);
long subTimeout = 1000;
JobSla jobSla = new JobSla(100,10,JobSla.StreamSLAType.Lossy,MantisJobDurationType.Perpetual,"userType");
JobDefinition jobDefn = new JobDefinition.Builder()
.withArtifactName(artifactName)
.withName(clusterName)
.withLabels(labels)
.withParameters(params)
.withSchedulingInfo(DEFAULT_SCHED_INFO)
.withUser("user")
.withJobSla(jobSla)
.withSubscriptionTimeoutSecs(subTimeout)
.withNumberOfStages(DEFAULT_SCHED_INFO.getStages().size())
.build();
JobId jobId = new JobId(clusterName,1);
long currTime = System.currentTimeMillis();
Instant startedAt = Instant.ofEpochMilli(currTime);
Instant endedAt = startedAt.plusSeconds(5);
Instant submittedAt = startedAt.minusSeconds(5);
IMantisJobMetadata jobmeta = new MantisJobMetadataImpl.Builder()
.withJobDefinition(jobDefn)
.withJobId(jobId)
.withNextWorkerNumToUse(2)
.withSubmittedAt(submittedAt)
.withJobState(JobState.Launched)
.build();
IMantisWorkerMetadata workerMetadata = new MantisWorkerMetadataImpl(0,
1, jobId.getId(),
1,3, new WorkerPorts(Lists.newArrayList(8000, 9000, 9010, 9020, 9030)), WorkerState.Started,
"slave","slaveId",startedAt.toEpochMilli(),startedAt.toEpochMilli(),
startedAt.toEpochMilli(),startedAt.toEpochMilli(),-1,JobCompletedReason.Normal,
0,0,of("cluster"), Optional.empty());
((MantisJobMetadataImpl) jobmeta).addJobStageIfAbsent(new MantisStageMetadataImpl.Builder()
.withNumStages(1)
.withStageNum(1)
.withNumWorkers(1)
.withJobId(jobId)
.withHardConstraints(Lists.newArrayList())
.withSoftConstraints(Lists.newArrayList())
.withMachineDefinition(DEFAULT_MACHINE_DEFINITION)
.build());
((MantisJobMetadataImpl) jobmeta).addWorkerMetadata(1, new JobWorker(workerMetadata,eventPublisher));
MantisJobMetadata oldFormat = DataFormatAdapter.convertMantisJobMetadataToMantisJobMetadataWriteable(jobmeta);
System.out.println("oldForamt -> " + oldFormat);
assertEquals(jobId.getId(), oldFormat.getJobId());
assertEquals(label,oldFormat.getLabels().get(0));
assertEquals(param,oldFormat.getParameters().get(0));
assertEquals(clusterName,oldFormat.getName());
assertEquals(jobSla,oldFormat.getSla());
assertEquals(1,oldFormat.getNumStages());
assertEquals(subTimeout,oldFormat.getSubscriptionTimeoutSecs());
assertEquals(2,oldFormat.getNextWorkerNumberToUse());
assertEquals("http://" + artifactName,oldFormat.getJarUrl().toString());
assertEquals(MantisJobState.Launched, oldFormat.getState());
assertEquals(submittedAt.toEpochMilli(),oldFormat.getSubmittedAt());
assertEquals("user",oldFormat.getUser());
IMantisJobMetadata reconverted = DataFormatAdapter.convertMantisJobWriteableToMantisJobMetadata(oldFormat, eventPublisher);
System.out.println("newForamt -> " + reconverted);
//assertEquals(jobmeta, reconverted);
// assertTrue(jobmeta.equals(reconverted));
assertEquals(jobmeta.getArtifactName(),reconverted.getArtifactName());
assertEquals(jobmeta.getClusterName(),reconverted.getClusterName());
System.out.println("expected Jobdef " + jobmeta.getJobDefinition());
System.out.println("actual Jobdef " + reconverted.getJobDefinition());
assertEquals(jobmeta.getJobDefinition(),reconverted.getJobDefinition());
assertEquals(jobmeta.getJobId(),reconverted.getJobId());
assertEquals(jobmeta.getJobJarUrl(),reconverted.getJobJarUrl());
assertEquals(jobmeta.getLabels().get(0),reconverted.getLabels().get(0));
assertEquals(jobmeta.getParameters().get(0),reconverted.getParameters().get(0));
assertEquals(jobmeta.getMinRuntimeSecs(),reconverted.getMinRuntimeSecs());
assertEquals(jobmeta.getNextWorkerNumberToUse(),reconverted.getNextWorkerNumberToUse());
assertEquals(jobmeta.getSla().get(),reconverted.getSla().get());
assertEquals(jobmeta.getSubmittedAtInstant(),reconverted.getSubmittedAtInstant());
assertEquals(jobmeta.getState(),reconverted.getState());
assertEquals(jobmeta.getSubscriptionTimeoutSecs(),reconverted.getSubscriptionTimeoutSecs());
assertEquals(jobmeta.getTotalStages(),reconverted.getTotalStages());
assertEquals(jobmeta.getUser(),reconverted.getUser());
// assertEquals(jobmeta.getSchedulingInfo(), reconverted.getSchedulingInfo());
}
}
| 7,878 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/server/master/domain/JobIdTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import static org.junit.Assert.assertEquals;
import java.util.Optional;
import org.junit.Test;
public class JobIdTest {
@Test
public void testJobId() {
final JobId jobId = new JobId("clustername", 10);
final String idString1 = jobId.toString();
final Optional<JobId> fromId = JobId.fromId(idString1);
assert(fromId.isPresent());
assertEquals(jobId, fromId.get());
final String idString2 = jobId.getId();
final Optional<JobId> fromId2 = JobId.fromId(idString2);
assert(fromId2.isPresent());
assertEquals(jobId, fromId2.get());
}
}
| 7,879 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/server/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/server/master/domain/JobClusterConfigTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.master.domain;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import org.junit.Test;
public class JobClusterConfigTest {
private static final SchedulingInfo DEFAULT_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(new MachineDefinition(1, 10, 10, 10, 2), Lists.newArrayList(), Lists.newArrayList()).build();
@Test
public void happyTest() {
String name = "happyTest";
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName("myart")
.withSchedulingInfo(DEFAULT_SCHED_INFO)
.withVersion("0.0.1")
.build();
try {
final JobClusterDefinitionImpl fakeJobCluster = new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(clusterConfig)
.withName(name)
.withUser("nj")
.withParameters(Lists.newArrayList())
.withIsReadyForJobMaster(true)
.withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo"))
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.build();
} catch(Exception e) {
fail();
}
}
@Test(expected = Exception.class)
public void noSchedInfoFails() {
String name = "noSchedInfoFails";
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName("myart")
.withSchedulingInfo(null)
.withVersion("0.0.1")
.build();
final JobClusterDefinitionImpl fakeJobCluster = new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(clusterConfig)
.withName(name)
.withParameters(Lists.newArrayList())
.withUser("nj")
.withIsReadyForJobMaster(true)
.withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo"))
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.build();
}
@Test(expected = Exception.class)
public void noArtifactNameFails() {
String name = "noArtifactNameFails";
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName(null)
.withSchedulingInfo(DEFAULT_SCHED_INFO)
.withVersion("0.0.1")
.build();
final JobClusterDefinitionImpl fakeJobCluster = new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(clusterConfig)
.withName(name)
.withUser("nj")
.withParameters(Lists.newArrayList())
.withIsReadyForJobMaster(true)
.withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo"))
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.build();
}
@Test
public void noVersionAutogenerate() {
String name = "noArtifactNameFails";
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName("myart")
.withSchedulingInfo(DEFAULT_SCHED_INFO)
.build();
final JobClusterDefinitionImpl fakeJobCluster = new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(clusterConfig)
.withName(name)
.withUser("nj")
.withParameters(Lists.newArrayList())
.withIsReadyForJobMaster(true)
.withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo"))
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.build();
assertTrue(clusterConfig.getVersion() != null);
}
@Test
public void jobClusterDefnTest() {
String name = "jobClusterDefnTest";
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName("myart")
.withSchedulingInfo(DEFAULT_SCHED_INFO)
.withVersion("0.0.1")
.build();
try {
// null cluster config is not allowed
final JobClusterDefinitionImpl fakeJobCluster = new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(null)
.withName(name)
.withUser("nj")
.withParameters(Lists.newArrayList())
.withIsReadyForJobMaster(true)
.withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo"))
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.build();
fail();
} catch(Exception e) {
}
try {
// cluster name is not specified
final JobClusterDefinitionImpl fakeJobCluster = new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(clusterConfig)
.withUser("nj")
.withIsReadyForJobMaster(true)
.withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo"))
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.build();
fail();
} catch(Exception e) {
}
}
}
| 7,880 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/scheduler/AgentsErrorMonitorTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.scheduler;
import static io.mantisrx.master.scheduler.AgentsErrorMonitorActor.props;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.testkit.javadsl.TestKit;
import io.mantisrx.master.events.LifecycleEventsProto;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import java.util.ArrayList;
import java.util.List;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import rx.functions.Action1;
public class AgentsErrorMonitorTest {
static ActorSystem system;
private static TestKit probe;
@BeforeClass
public static void setup() {
system = ActorSystem.create();
probe = new TestKit(system);
}
@AfterClass
public static void tearDown() {
TestKit.shutdownActorSystem(system);
system = null;
}
@Test
public void hostErrorTest_disableHost() {
EnableHostAction enableHostAction = new EnableHostAction();
AgentsErrorMonitorActor.HostErrors hostErrors = new AgentsErrorMonitorActor.HostErrors("host1", enableHostAction, 120000, 3);
long t1 = 1000;
LifecycleEventsProto.WorkerStatusEvent workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent));
t1+=100;
workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent));
t1+=100;
workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent));
t1+=100;
assertEquals(0,enableHostAction.getEnableHostList().size());
// no of errors is now 4 which is greater than 3
workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
assertTrue(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent));
}
@Test
public void hostErrorTest_enableHost() {
EnableHostAction enableHostAction = new EnableHostAction();
AgentsErrorMonitorActor.HostErrors hostErrors = new AgentsErrorMonitorActor.HostErrors("host1", enableHostAction, 120000, 3);
long t1 = 1000;
LifecycleEventsProto.WorkerStatusEvent workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent));
t1+=100;
workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent));
t1+=100;
workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent));
t1+=100;
assertEquals(0,enableHostAction.getEnableHostList().size());
// no of errors is now 4 which is greater than 3
workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
assertTrue(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent));
workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Started, t1, "host1");
assertFalse(hostErrors.addAndGetIsTooManyErrors(workerStatusEvent));
// 4th event comes in with a non terminal event. This should reenable the host
assertEquals(1, enableHostAction.getEnableHostList().size());
assertEquals("host1", enableHostAction.getEnableHostList().get(0));
}
@Test
public void basicTest() {
long too_old_millis = 4000;
int error_check_window_count = 3;
long error_check_window_millis = 2000;
long disableDuration = 1000;
long t1 = 1000;
ActorRef errorMonitorActor = system.actorOf(props(too_old_millis, error_check_window_count, error_check_window_millis,disableDuration));
MantisScheduler schedulerMock = mock(MantisScheduler.class);
errorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(schedulerMock), probe.getRef());
LifecycleEventsProto.WorkerStatusEvent workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
errorMonitorActor.tell(workerStatusEvent,probe.getRef());
t1+=100;
workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
errorMonitorActor.tell(workerStatusEvent,probe.getRef());
t1+=100;
workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
errorMonitorActor.tell(workerStatusEvent,probe.getRef());
t1+=100;
errorMonitorActor.tell(new AgentsErrorMonitorActor.HostErrorMapRequest(), probe.getRef());
AgentsErrorMonitorActor.HostErrorMapResponse hostErrorMapResponse = probe.expectMsgClass(AgentsErrorMonitorActor.HostErrorMapResponse.class);
assertTrue(hostErrorMapResponse.getMap().containsKey("host1"));
List<Long> errorTsList = hostErrorMapResponse.getMap().get("host1").getErrorTimestampList();
assertEquals(3, errorTsList.size());
workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
errorMonitorActor.tell(workerStatusEvent,probe.getRef());
errorMonitorActor.tell(new AgentsErrorMonitorActor.HostErrorMapRequest(), probe.getRef());
hostErrorMapResponse = probe.expectMsgClass(AgentsErrorMonitorActor.HostErrorMapResponse.class);
assertTrue(hostErrorMapResponse.getMap().containsKey("host1"));
errorTsList = hostErrorMapResponse.getMap().get("host1").getErrorTimestampList();
assertEquals(4, errorTsList.size());
verify(schedulerMock, times(1)).disableVM("host1",disableDuration);
}
@Test
public void testOldHostEviction() {
EnableHostAction enableHostAction = new EnableHostAction();
DisableHostAction disableHostAction = new DisableHostAction();
long too_old_millis = 4000;
int error_check_window_count = 3;
long error_check_window_millis = 2000;
long t1 = 1000;
ActorRef errorMonitorActor = system.actorOf(props(too_old_millis, error_check_window_count, error_check_window_millis, 1000));
MantisScheduler schedulerMock = mock(MantisScheduler.class);
errorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(schedulerMock), probe.getRef());
LifecycleEventsProto.WorkerStatusEvent workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
errorMonitorActor.tell(workerStatusEvent,probe.getRef());
t1+=100;
workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
errorMonitorActor.tell(workerStatusEvent,probe.getRef());
t1+=100;
workerStatusEvent = generateWorkerEvent("sine-function-1-worker-0-4", WorkerState.Failed, t1, "host1");
errorMonitorActor.tell(workerStatusEvent,probe.getRef());
t1+=100;
// ensure host 1 is registered in error map
errorMonitorActor.tell(new AgentsErrorMonitorActor.HostErrorMapRequest(), probe.getRef());
AgentsErrorMonitorActor.HostErrorMapResponse hostErrorMapResponse = probe.expectMsgClass(AgentsErrorMonitorActor.HostErrorMapResponse.class);
assertTrue(hostErrorMapResponse.getMap().containsKey("host1"));
// simulate periodic check in the future
errorMonitorActor.tell(new AgentsErrorMonitorActor.CheckHostHealthMessage(t1+ 100000), probe.getRef());
// host1 should've been evicted as no new events were seen from it
errorMonitorActor.tell(new AgentsErrorMonitorActor.HostErrorMapRequest(), probe.getRef());
hostErrorMapResponse = probe.expectMsgClass(AgentsErrorMonitorActor.HostErrorMapResponse.class);
assertTrue(hostErrorMapResponse.getMap().isEmpty());
}
@Test
public void noHostEventIgnoredTest() {
EnableHostAction enableHostAction = new EnableHostAction();
DisableHostAction disableHostAction = new DisableHostAction();
long too_old_millis = 4000;
int error_check_window_count = 3;
long error_check_window_millis = 2000;
long t1 = 1000;
ActorRef errorMonitorActor = system.actorOf(props( too_old_millis, error_check_window_count, error_check_window_millis, 1000));
MantisScheduler schedulerMock = mock(MantisScheduler.class);
errorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(schedulerMock), probe.getRef());
LifecycleEventsProto.WorkerStatusEvent workerStatusEvent = new LifecycleEventsProto.WorkerStatusEvent(
LifecycleEventsProto.StatusEvent.StatusEventType.INFO,
"test message",
1,
WorkerId.fromId("sine-function-1-worker-0-4").get(),
WorkerState.Failed,
1000);
errorMonitorActor.tell(workerStatusEvent,probe.getRef());
errorMonitorActor.tell(new AgentsErrorMonitorActor.HostErrorMapRequest(), probe.getRef());
AgentsErrorMonitorActor.HostErrorMapResponse hostErrorMapResponse = probe.expectMsgClass(AgentsErrorMonitorActor.HostErrorMapResponse.class);
assertTrue(hostErrorMapResponse.getMap().isEmpty());
}
@Test
public void eventFromWorkerNotYetOnHostIgnoredTest() {
EnableHostAction enableHostAction = new EnableHostAction();
DisableHostAction disableHostAction = new DisableHostAction();
long too_old_millis = 4000;
int error_check_window_count = 3;
long error_check_window_millis = 2000;
long t1 = 1000;
ActorRef errorMonitorActor = system.actorOf(props(too_old_millis, error_check_window_count, error_check_window_millis, 1000));
MantisScheduler schedulerMock = mock(MantisScheduler.class);
errorMonitorActor.tell(new AgentsErrorMonitorActor.InitializeAgentsErrorMonitor(schedulerMock), probe.getRef());
LifecycleEventsProto.WorkerStatusEvent workerStatusEvent = new LifecycleEventsProto.WorkerStatusEvent(
LifecycleEventsProto.StatusEvent.StatusEventType.INFO,
"test message",
1,
WorkerId.fromId("sine-function-1-worker-0-4").get(),
WorkerState.Launched,
"host1",
1000);
errorMonitorActor.tell(workerStatusEvent,probe.getRef());
errorMonitorActor.tell(new AgentsErrorMonitorActor.HostErrorMapRequest(), probe.getRef());
AgentsErrorMonitorActor.HostErrorMapResponse hostErrorMapResponse = probe.expectMsgClass(AgentsErrorMonitorActor.HostErrorMapResponse.class);
assertTrue(hostErrorMapResponse.getMap().isEmpty());
}
private LifecycleEventsProto.WorkerStatusEvent generateWorkerEvent(String id, WorkerState state, long ts, String host) {
return new LifecycleEventsProto.WorkerStatusEvent(
LifecycleEventsProto.StatusEvent.StatusEventType.INFO,
"test message",
1,
WorkerId.fromId(id).get(),
state,
host,
ts);
}
class EnableHostAction implements Action1<String>{
List<String> enableHostList = new ArrayList<>();
public EnableHostAction() {
}
@Override
public void call(String s) {
enableHostList.add(s);
}
public List<String> getEnableHostList() {
return this.enableHostList;
}
}
class DisableHostAction implements Action1<String>{
List<String> disableHostList = new ArrayList<>();
public DisableHostAction() {
}
@Override
public void call(String s) {
disableHostList.add(s);
}
public List<String> getDisableHostList() {
return this.disableHostList;
}
}
}
| 7,881 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/scheduler/FakeMantisScheduler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.scheduler;
import akka.actor.ActorRef;
import com.netflix.fenzo.VirtualMachineCurrentState;
import com.netflix.fenzo.VirtualMachineLease;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat;
import io.mantisrx.master.jobcluster.job.worker.WorkerStatus;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.ScheduleRequest;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import io.mantisrx.server.master.scheduler.WorkerLaunched;
import io.mantisrx.server.master.scheduler.WorkerResourceStatus;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
public class FakeMantisScheduler implements MantisScheduler {
private final ActorRef jobClusterManagerActor;
public FakeMantisScheduler(final ActorRef jobClusterManagerActor) {
this.jobClusterManagerActor = jobClusterManagerActor;
}
@Override
public void scheduleWorker(final ScheduleRequest scheduleRequest) {
// Worker Launched
final WorkerEvent workerLaunched = new WorkerLaunched(scheduleRequest.getWorkerId(),
scheduleRequest.getStageNum(),
"host1",
"vm1",
scheduleRequest.getPreferredCluster(), Optional.empty(), new WorkerPorts(Lists.newArrayList(8000, 9000, 9010, 9020, 9030)));
jobClusterManagerActor.tell(workerLaunched, ActorRef.noSender());
// fake Worker Start initiated event
final WorkerEvent workerStartInit = new WorkerStatus(new Status(
scheduleRequest.getWorkerId().getJobId(),
scheduleRequest.getStageNum(),
scheduleRequest.getWorkerId().getWorkerIndex(),
scheduleRequest.getWorkerId().getWorkerNum(),
Status.TYPE.INFO,
"fake Start Initiated",
MantisJobState.StartInitiated));
jobClusterManagerActor.tell(workerStartInit, ActorRef.noSender());
// fake Worker Heartbeat event
final WorkerEvent workerHeartbeat = new WorkerHeartbeat(new Status(
scheduleRequest.getWorkerId().getJobId(),
scheduleRequest.getStageNum(),
scheduleRequest.getWorkerId().getWorkerIndex(),
scheduleRequest.getWorkerId().getWorkerNum(),
Status.TYPE.HEARTBEAT,
"fake heartbeat event",
MantisJobState.Started));
jobClusterManagerActor.tell(workerHeartbeat, ActorRef.noSender());
}
@Override
public void unscheduleWorker(final WorkerId workerId, final Optional<String> hostname) {
final WorkerEvent workerCompleted = new WorkerResourceStatus(workerId,
"fake unschedule worker", WorkerResourceStatus.VMResourceState.COMPLETED);
jobClusterManagerActor.tell(workerCompleted, ActorRef.noSender());
}
@Override
public void unscheduleAndTerminateWorker(final WorkerId workerId, final Optional<String> hostname) {
unscheduleWorker(workerId, hostname);
}
@Override
public void updateWorkerSchedulingReadyTime(final WorkerId workerId, final long when) {
// no-op
}
@Override
public void initializeRunningWorker(final ScheduleRequest scheduleRequest, final String hostname, final String hostID) {
// no-op
}
@Override
public void rescindOffer(final String offerId) {
// TBD
}
@Override
public void rescindOffers(final String hostname) {
// TBD
}
@Override
public void addOffers(final List<VirtualMachineLease> offers) {
// TBD
}
@Override
public void disableVM(final String hostname, final long durationMillis) throws IllegalStateException {
// TBD
}
@Override
public void enableVM(final String hostname) {
// TBD
}
@Override
public List<VirtualMachineCurrentState> getCurrentVMState() {
// TBD
return Collections.emptyList();
}
@Override
public void setActiveVmGroups(final List<String> activeVmGroups) {
// TBD
}
}
| 7,882 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/SLAEnforcerTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.mantisrx.master.jobcluster.JobClusterActor.JobInfo;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.domain.SLA;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.util.List;
import java.util.Set;
import org.joda.time.Instant;
import org.junit.Test;
public class SLAEnforcerTest {
@Test
public void testSorting() {
Instant now = Instant.now();
List<JobInfo> jobList = Lists.newArrayList(
new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null),
new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Launched, null)
);
int min =1;
int max =1;
SLA sla = new SLA(min,max, null, null);
SLAEnforcer slaEnforcer = new SLAEnforcer(sla);
Set<JobInfo> sortJobsByIdDesc = slaEnforcer.sortJobsByIdDesc(jobList);
String [] expectedOrder = {"cname-1","cname-2","cname-3","cname-4"};
JobInfo [] jobIdArray = sortJobsByIdDesc.toArray(new JobInfo[sortJobsByIdDesc.size()]);
for(int i=0; i< jobIdArray.length; i++) {
System.out.println("[" + i + "] ->" + jobIdArray[i]);
assertEquals(expectedOrder[i],(jobIdArray[i].jobId.getId()));
}
}
@Test
public void slaValidationTest() {
int min = 5;
int max = 2;
try {
SLA sla = new SLA(5, 2, null, null);
fail();
} catch(Exception e) {}
}
@Test
public void slaMinInvalidArgTest() {
int min = 2;
int max = 0;
try {
SLA sla = new SLA(min,max, null, null);
SLAEnforcer slaEnf = new SLAEnforcer(sla);
slaEnf.enforceSLAMin(-1, 0);
fail();
} catch(Exception e) {}
}
@Test
public void slaMinDefaultsTest() {
SLA sla = new SLA(0,0, null, null);
SLAEnforcer slaEnf = new SLAEnforcer(sla);
assertEquals(0, slaEnf.enforceSLAMin(2, 0));
try {
slaEnf = new SLAEnforcer(null);
assertEquals(0, slaEnf.enforceSLAMin(2, 0));
} catch(Exception e) {
fail();
}
}
@Test
public void slaMinTest() {
int min = 2;
int max = 10;
SLA sla = new SLA(min,max, null, null);
SLAEnforcer slaEnf = new SLAEnforcer(sla);
// min is 2 and active jobs count is 2 no need to launch any jobs
assertEquals(0, slaEnf.enforceSLAMin(2, 0));
// min is 2 and active jobs is 1 and launched jobs is 1 no need to launch any more jobs
assertEquals(0, slaEnf.enforceSLAMin(1, 1));
// min is 2, active = 1, launched = 0, therefore launch 1 job
assertEquals(1, slaEnf.enforceSLAMin(1, 0));
}
@Test
public void slaMaxDefaultsTest() {
Instant now = Instant.now();
int min = 0;
int max = 0;
SLA sla = new SLA(min,max, null, null);
SLAEnforcer slaEnf = new SLAEnforcer(null);
List<JobInfo> jobList = Lists.newArrayList(
new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Launched, null),
new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null)
);
// sla not set nothing to enforce
try {
List<JobId> jobsToDelete = slaEnf.enforceSLAMax(jobList);
assertTrue(jobsToDelete.isEmpty());
} catch(Exception e) {
fail();
}
slaEnf = new SLAEnforcer(sla);
jobList = Lists.newArrayList(
new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Launched, null),
new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null)
);
// sla max is 0 nothing to enforce
List<JobId> jobsToDelete = slaEnf.enforceSLAMax(jobList);
assertTrue(jobsToDelete.isEmpty());
}
@Test
public void slaMaxTest() {
Instant now = Instant.now();
int min = 0;
int max = 2;
SLA sla = new SLA(min,max, null, null);
SLAEnforcer slaEnf = new SLAEnforcer(sla);
List<JobInfo> jobList = Lists.newArrayList(
new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Launched, null),
new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null)
);
// 2 active and 2 accepted jobs, sla met at job id 2, hence delete job 1
List<JobId> jobsToDelete = slaEnf.enforceSLAMax(jobList);
assertEquals(1, jobsToDelete.size());
assertEquals("cname-1", jobsToDelete.get(0).getId());
}
@Test
public void slaMaxTest2() {
Instant now = Instant.now();
int min = 0;
int max = 2;
SLA sla = new SLA(min,max, null, null);
SLAEnforcer slaEnf = new SLAEnforcer(sla);
List<JobInfo> jobList = Lists.newArrayList(
new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Launched, null),
new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Launched, null),
new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null)
);
// 3 active and 1 accepted jobs, terminate job 2
List<JobId> jobsToDelete = slaEnf.enforceSLAMax(jobList);
assertEquals(2, jobsToDelete.size());
boolean job1Found = false;
boolean job2Found = false;
for(JobId jId : jobsToDelete) {
if(jId.getId().equals("cname-1")) {
job1Found = true;
} else if(jId.getId().equals("cname-2")) {
job2Found = true;
}
}
assertTrue(job1Found && job2Found);
}
@Test
public void slaMaxTest3() {
Instant now = Instant.now();
int min = 0;
int max = 2;
SLA sla = new SLA(min,max, null, null);
SLAEnforcer slaEnf = new SLAEnforcer(sla);
List<JobInfo> jobList = Lists.newArrayList(
new JobInfo(new JobId("cname", 5), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null),
new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 6), null, now.getMillis(), null, JobState.Launched, null)
);
// 2 active and 4 accepted jobs, terminate jobs 3,2,1
List<JobId> jobsToDelete = slaEnf.enforceSLAMax(jobList);
assertEquals(3, jobsToDelete.size());
assertTrue(jobsToDelete.contains(new JobId("cname",1)));
assertTrue(jobsToDelete.contains(new JobId("cname",2)));
assertTrue(jobsToDelete.contains(new JobId("cname",3)));
}
@Test
public void slaMaxTest4() {
Instant now = Instant.now();
int min = 0;
int max = 2;
SLA sla = new SLA(min,max, null, null);
SLAEnforcer slaEnf = new SLAEnforcer(sla);
List<JobInfo> jobList = Lists.newArrayList(
new JobInfo(new JobId("cname", 4), null, now.getMillis(), null, JobState.Launched, null),
new JobInfo(new JobId("cname", 1), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 2), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 6), null, now.getMillis(), null, JobState.Launched, null),
new JobInfo(new JobId("cname", 3), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 5), null, now.getMillis(), null, JobState.Accepted, null),
new JobInfo(new JobId("cname", 7), null, now.getMillis(), null, JobState.Launched, null)
);
// 3 active and 4 accepted jobs, terminate jobs 1 & 2 & 3 & 4 & 5
List<JobId> jobsToDelete = slaEnf.enforceSLAMax(jobList);
assertEquals(5, jobsToDelete.size());
assertTrue(jobsToDelete.contains(new JobId("cname",1)));
assertTrue(jobsToDelete.contains(new JobId("cname",2)));
assertTrue(jobsToDelete.contains(new JobId("cname",3)));
assertTrue(jobsToDelete.contains(new JobId("cname",4)));
assertTrue(jobsToDelete.contains(new JobId("cname",5)));
}
}
| 7,883 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/JobDefinitionResolverTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.command.InvalidJobException;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.master.domain.JobClusterConfig;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.SLA;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.junit.Test;
public class JobDefinitionResolverTest {
public static final SLA NO_OP_SLA = new SLA(0, 0, null, null);
public static final MachineDefinition DEFAULT_MACHINE_DEFINITION = new MachineDefinition(1, 10, 10, 10, 2);
public static final SchedulingInfo SINGLE_WORKER_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(DEFAULT_MACHINE_DEFINITION, Lists.newArrayList(), Lists.newArrayList()).build();
public static final SchedulingInfo TWO_WORKER_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerStage(2, DEFAULT_MACHINE_DEFINITION).build();
public static final JobOwner DEFAULT_JOB_OWNER = new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo");
public static final String DEFAULT_ARTIFACT_NAME = "myart";
public static final String DEFAULT_VERSION = "0.0.1";
private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName) {
return createFakeJobClusterDefn(clusterName, Lists.newArrayList(), Lists.newArrayList(), NO_OP_SLA, SINGLE_WORKER_SCHED_INFO);
}
private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName, List<Label> labels, List<Parameter> parameters) {
return createFakeJobClusterDefn(clusterName, labels, parameters, NO_OP_SLA, SINGLE_WORKER_SCHED_INFO);
}
private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName, List<Label> labels, List<Parameter> parameters, SLA sla, SchedulingInfo schedulingInfo) {
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName(DEFAULT_ARTIFACT_NAME)
.withSchedulingInfo(schedulingInfo)
.withVersion(DEFAULT_VERSION)
.build();
return new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(clusterConfig)
.withName(clusterName)
.withParameters(parameters)
.withLabels(labels)
.withUser("user")
.withIsReadyForJobMaster(true)
.withOwner(DEFAULT_JOB_OWNER)
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.withSla(sla)
.build();
}
@Test
public void artifactSchedPresentTest() {
String clusterName = "artifactVersionSchedPresentTest";
List<Label> labels = new ArrayList<>();
Label label = new Label("l1", "lv1");
labels.add(label);
List<Parameter> parameters = new ArrayList<>();
Parameter parameter = new Parameter("paramName", "paramValue");
parameters.add(parameter);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels, parameters);
IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl(fakeJobCluster,1,false);
String version = "0.0.2";
String artifactName = "myArt2";
SchedulingInfo schedulingInfo = TWO_WORKER_SCHED_INFO;
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withArtifactName(artifactName).withName(clusterName).withSchedulingInfo(schedulingInfo).withVersion(version).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
// assert the specified values are being used
assertEquals(artifactName, resolvedJobDefinition.getArtifactName());
assertEquals(schedulingInfo, resolvedJobDefinition.getSchedulingInfo());
assertEquals(version, resolvedJobDefinition.getVersion());
// assert the parameters and labels are inherited since they were not specified
assertEquals(1, resolvedJobDefinition.getLabels().size());
assertEquals(label, resolvedJobDefinition.getLabels().get(0));
assertEquals(1, resolvedJobDefinition.getParameters().size());
assertEquals(parameter, resolvedJobDefinition.getParameters().get(0));
} catch (InvalidJobException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
// Only ArtifactName and schedInfo is specified
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withArtifactName(artifactName).withName(clusterName).withSchedulingInfo(schedulingInfo).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
// assert the specified values are being used
assertEquals(artifactName, resolvedJobDefinition.getArtifactName());
assertEquals(schedulingInfo, resolvedJobDefinition.getSchedulingInfo());
// assert a version no was generated
assertTrue(resolvedJobDefinition.getVersion()!= null && !resolvedJobDefinition.getVersion().isEmpty());
// assert the parameters and labels are inherited since they were not specified
assertEquals(1, resolvedJobDefinition.getLabels().size());
assertEquals(label, resolvedJobDefinition.getLabels().get(0));
assertEquals(1, resolvedJobDefinition.getParameters().size());
assertEquals(parameter, resolvedJobDefinition.getParameters().get(0));
} catch (InvalidJobException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void artifactPresentButSchedAbsentFailsTest() {
String clusterName = "artifactPresentButSchedAbsentFailsTest";
List<Label> labels = new ArrayList<>();
Label label = new Label("l1", "lv1");
labels.add(label);
List<Parameter> parameters = new ArrayList<>();
Parameter parameter = new Parameter("paramName", "paramValue");
parameters.add(parameter);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels, parameters);
IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl(fakeJobCluster,1,false);
String version = "0.0.2";
String artifactName = "myArt2";
// Only new artifact and version is specified
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withArtifactName(artifactName).withName(clusterName).withVersion(version).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
fail();
} catch (Exception e) {
e.printStackTrace();
}
// Only new artifact is specified
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withArtifactName(artifactName).withName(clusterName).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
fail();
} catch (Exception e) {
e.printStackTrace();
}
}
@Test
public void versionSchedPresentTest() {
String clusterName = "versionSchedPresentTest";
List<Label> labels = new ArrayList<>();
Label label = new Label("l1", "lv1");
labels.add(label);
List<Parameter> parameters = new ArrayList<>();
Parameter parameter = new Parameter("paramName", "paramValue");
parameters.add(parameter);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels, parameters);
IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl(fakeJobCluster,1,false);
String version = "0.0.1";
JobConstraints softConstraints = JobConstraints.ExclusiveHost;
List<JobConstraints> constraintsList = new ArrayList<>();
constraintsList.add(softConstraints);
SchedulingInfo schedulingInfo = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(DEFAULT_MACHINE_DEFINITION, Lists.newArrayList(), constraintsList).build();
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withSchedulingInfo(schedulingInfo).withVersion(version).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
// artifact will get populated using the given version.
assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName());
// scheduling info will be the one specified by us
assertEquals(schedulingInfo, resolvedJobDefinition.getSchedulingInfo());
// version should match what we set.
assertEquals(version, resolvedJobDefinition.getVersion());
// assert the parameters and labels are inherited since they were not specified
assertEquals(1, resolvedJobDefinition.getLabels().size());
assertEquals(label, resolvedJobDefinition.getLabels().get(0));
assertEquals(1, resolvedJobDefinition.getParameters().size());
assertEquals(parameter, resolvedJobDefinition.getParameters().get(0));
} catch (InvalidJobException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
// Only version is specified
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withVersion(version).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
// assert the artifact is inherited
assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName());
// assert the scheduling info is inherited
assertEquals(SINGLE_WORKER_SCHED_INFO, resolvedJobDefinition.getSchedulingInfo());
// assert a version is the one we gave
assertEquals(version, resolvedJobDefinition.getVersion());
// assert the parameters and labels are inherited since they were not specified
assertEquals(1, resolvedJobDefinition.getLabels().size());
assertEquals(label, resolvedJobDefinition.getLabels().get(0));
assertEquals(1, resolvedJobDefinition.getParameters().size());
assertEquals(parameter, resolvedJobDefinition.getParameters().get(0));
} catch (InvalidJobException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void SchedPresentTest() {
String clusterName = "SchedPresentTest";
List<Label> labels = new ArrayList<>();
Label label = new Label("l1", "lv1");
labels.add(label);
List<Parameter> parameters = new ArrayList<>();
Parameter parameter = new Parameter("paramName", "paramValue");
parameters.add(parameter);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels, parameters);
IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl(fakeJobCluster,1,false);
JobConstraints softConstraints = JobConstraints.ExclusiveHost;
List<JobConstraints> constraintsList = new ArrayList<>();
constraintsList.add(softConstraints);
SchedulingInfo schedulingInfo = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(DEFAULT_MACHINE_DEFINITION, Lists.newArrayList(), constraintsList).build();
try {
// only sched info set.
JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withSchedulingInfo(schedulingInfo).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
// artifact will get populated using the given version.
assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName());
// scheduling info will be the one specified by us
assertEquals(schedulingInfo, resolvedJobDefinition.getSchedulingInfo());
// version should match the latest on the cluster
assertEquals(DEFAULT_VERSION, resolvedJobDefinition.getVersion());
// assert the parameters and labels are inherited since they were not specified
assertEquals(1, resolvedJobDefinition.getLabels().size());
assertEquals(label, resolvedJobDefinition.getLabels().get(0));
assertEquals(1, resolvedJobDefinition.getParameters().size());
assertEquals(parameter, resolvedJobDefinition.getParameters().get(0));
} catch (InvalidJobException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
// NOTHING is specified
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
// assert the artifact is inherited
assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName());
// assert the scheduling info is inherited
assertEquals(SINGLE_WORKER_SCHED_INFO, resolvedJobDefinition.getSchedulingInfo());
// assert a version is the dfeault one.
assertEquals(DEFAULT_VERSION, resolvedJobDefinition.getVersion());
// assert the parameters and labels are inherited since they were not specified
assertEquals(1, resolvedJobDefinition.getLabels().size());
assertEquals(label, resolvedJobDefinition.getLabels().get(0));
assertEquals(1, resolvedJobDefinition.getParameters().size());
assertEquals(parameter, resolvedJobDefinition.getParameters().get(0));
} catch (InvalidJobException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
// NOTHING is specified2
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withVersion("null").build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
// assert the artifact is inherited
assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName());
// assert the scheduling info is inherited
assertEquals(SINGLE_WORKER_SCHED_INFO, resolvedJobDefinition.getSchedulingInfo());
// assert a version is the dfeault one.
assertEquals(DEFAULT_VERSION, resolvedJobDefinition.getVersion());
// assert the parameters and labels are inherited since they were not specified
assertEquals(1, resolvedJobDefinition.getLabels().size());
assertEquals(label, resolvedJobDefinition.getLabels().get(0));
assertEquals(1, resolvedJobDefinition.getParameters().size());
assertEquals(parameter, resolvedJobDefinition.getParameters().get(0));
} catch (InvalidJobException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void versionNotFoundTest() {
String clusterName = "versionNotFoundTest";
List<Label> labels = new ArrayList<>();
Label label = new Label("l1", "lv1");
labels.add(label);
List<Parameter> parameters = new ArrayList<>();
Parameter parameter = new Parameter("paramName", "paramValue");
parameters.add(parameter);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels, parameters);
IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl(fakeJobCluster,1,false);
String version = "0.0.2";
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withVersion(version).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
fail();
} catch (Exception e) {
e.printStackTrace();
}
}
@Test
public void lookupJobClusterConfigTest() {
String clusterName = "lookupJobClusterConfigTest";
JobClusterConfig clusterConfig1 = new JobClusterConfig.Builder()
.withArtifactName(DEFAULT_ARTIFACT_NAME)
.withSchedulingInfo(SINGLE_WORKER_SCHED_INFO)
.withVersion(DEFAULT_VERSION)
.build();
JobClusterConfig clusterConfig2 = new JobClusterConfig.Builder()
.withArtifactName("artifact2")
.withSchedulingInfo(TWO_WORKER_SCHED_INFO)
.withVersion("0.0.2")
.build();
List<JobClusterConfig> configList = new ArrayList<>();
configList.add(clusterConfig1);
configList.add(clusterConfig2);
JobClusterDefinitionImpl jobClusterDefinition = new JobClusterDefinitionImpl.Builder()
.withJobClusterConfigs(configList)
.withName(clusterName)
.withParameters(Lists.newArrayList())
.withLabels(Lists.newArrayList())
.withUser("user")
.withIsReadyForJobMaster(true)
.withOwner(DEFAULT_JOB_OWNER)
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.withSla(NO_OP_SLA)
.build();
IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl.Builder().withJobClusterDefinition(jobClusterDefinition).withLastJobCount(1).withIsDisabled(false).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
Optional<JobClusterConfig> config = resolver.getJobClusterConfigForVersion(jobClusterMetadata, DEFAULT_VERSION);
assertTrue(config.isPresent());
assertEquals(DEFAULT_ARTIFACT_NAME, config.get().getArtifactName());
assertEquals(DEFAULT_VERSION, config.get().getVersion());
assertEquals(SINGLE_WORKER_SCHED_INFO, config.get().getSchedulingInfo());
Optional<JobClusterConfig> config2 = resolver.getJobClusterConfigForVersion(jobClusterMetadata, "0.0.2");
assertTrue(config2.isPresent());
assertEquals("artifact2", config2.get().getArtifactName());
assertEquals("0.0.2", config2.get().getVersion());
assertEquals(TWO_WORKER_SCHED_INFO, config2.get().getSchedulingInfo());
try {
Optional<JobClusterConfig> config3 = resolver.getJobClusterConfigForVersion(jobClusterMetadata, "0.0.3");
assertTrue(!config3.isPresent());
} catch(Exception e) {
e.printStackTrace();
fail();
}
}
}
| 7,884 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/LabelCacheTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import static org.junit.Assert.*;
import io.mantisrx.common.Label;
import io.mantisrx.server.master.domain.JobId;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import org.junit.Test;
public class LabelCacheTest {
@Test
public void addLabelTest() {
JobClusterActor.LabelCache labelCache = new JobClusterActor.LabelCache();
JobId jId = new JobId("addLabelTest",1);
List<Label> labelList = new ArrayList<>();
Label label1 = new Label("l1","v1");
labelList.add(label1);
labelCache.addJobIdToLabelCache(jId,labelList);
assertTrue(labelCache.labelJobIdMap.containsKey(label1));
assertTrue(labelCache.jobIdToLabelMap.containsKey(jId));
}
@Test
public void addLabelTest2() {
JobClusterActor.LabelCache labelCache = new JobClusterActor.LabelCache();
JobId jId = new JobId("addLabelTest",1);
JobId jId2 = new JobId("addLabelTest",2);
List<Label> labelList = new ArrayList<>();
Label label1 = new Label("l1","v1");
labelList.add(label1);
labelCache.addJobIdToLabelCache(jId,labelList);
labelCache.addJobIdToLabelCache(jId2,labelList);
assertTrue(labelCache.labelJobIdMap.containsKey(label1));
assertTrue(labelCache.jobIdToLabelMap.containsKey(jId));
assertTrue(labelCache.jobIdToLabelMap.containsKey(jId2));
}
@Test
public void removeLabelTest() {
JobClusterActor.LabelCache labelCache = new JobClusterActor.LabelCache();
JobId jId = new JobId("addLabelTest",1);
List<Label> labelList = new ArrayList<>();
Label label1 = new Label("l1","v1");
labelList.add(label1);
labelCache.addJobIdToLabelCache(jId,labelList);
assertTrue(labelCache.labelJobIdMap.containsKey(label1));
assertTrue(labelCache.jobIdToLabelMap.containsKey(jId));
labelCache.removeJobIdFromLabelCache(jId);
assertFalse(labelCache.jobIdToLabelMap.containsKey(jId));
// label has no jobs associated with it remove label entry
assertFalse(labelCache.labelJobIdMap.containsKey(label1));
}
@Test
public void removeLabelTest2() {
JobClusterActor.LabelCache labelCache = new JobClusterActor.LabelCache();
JobId jId = new JobId("addLabelTest",1);
JobId jId2 = new JobId("addLabelTest",2);
List<Label> labelList = new ArrayList<>();
Label label1 = new Label("l1","v1");
labelList.add(label1);
labelCache.addJobIdToLabelCache(jId,labelList);
labelCache.addJobIdToLabelCache(jId2,labelList);
assertTrue(labelCache.labelJobIdMap.containsKey(label1));
assertTrue(labelCache.jobIdToLabelMap.containsKey(jId));
assertTrue(labelCache.jobIdToLabelMap.containsKey(jId2));
labelCache.removeJobIdFromLabelCache(jId);
assertFalse(labelCache.jobIdToLabelMap.containsKey(jId));
// label still has 1 job associated with it label entry should still exist
assertTrue(labelCache.labelJobIdMap.containsKey(label1));
}
@Test
public void matchingLabelsAndTest() {
JobClusterActor.LabelCache labelCache = new JobClusterActor.LabelCache();
JobId jId = new JobId("addLabelTest",1);
JobId jId2 = new JobId("addLabelTest",2);
JobId jId3 = new JobId("addLabelTest",3);
JobId jId4 = new JobId("addLabelTest",4);
List<Label> sourceList = new ArrayList<>();
List<Label> sourceMREList = new ArrayList<>();
List<Label> sourceKafkaList = new ArrayList<>();
Label jobTypeLabel = new Label("_mantis.jobType","source");
Label originMRELabel = new Label("_mantis.dataOrigin","mre");
Label originKafkaLabel = new Label("_mantis.dataOrigin","kafka");
sourceMREList.add(jobTypeLabel);
sourceMREList.add(originMRELabel);
sourceKafkaList.add(jobTypeLabel);
sourceKafkaList.add(originKafkaLabel);
sourceList.add(jobTypeLabel);
labelCache.addJobIdToLabelCache(jId,sourceMREList);
labelCache.addJobIdToLabelCache(jId2,sourceMREList);
labelCache.addJobIdToLabelCache(jId3,sourceMREList);
Set<JobId> jobIdsMatchingLabels = labelCache.getJobIdsMatchingLabels(sourceKafkaList, true);
System.out.println("matchset " + jobIdsMatchingLabels);
assertEquals(0, jobIdsMatchingLabels.size());
labelCache.addJobIdToLabelCache(jId4,sourceKafkaList);
jobIdsMatchingLabels = labelCache.getJobIdsMatchingLabels(sourceKafkaList, true);
System.out.println("matchset " + jobIdsMatchingLabels);
assertEquals(1, jobIdsMatchingLabels.size());
assertTrue( jobIdsMatchingLabels.contains(jId4));
jobIdsMatchingLabels = labelCache.getJobIdsMatchingLabels(sourceList, true);
System.out.println("matchset " + jobIdsMatchingLabels);
assertEquals(4, jobIdsMatchingLabels.size());
//assertTrue( jobIdsMatchingLabels.contains(jId4));
}
@Test
public void matchingLabelsOrTest() {
JobClusterActor.LabelCache labelCache = new JobClusterActor.LabelCache();
JobId jId = new JobId("addLabelTest",1);
JobId jId2 = new JobId("addLabelTest",2);
List<Label> labelList1 = new ArrayList<>();
List<Label> labelList2 = new ArrayList<>();
Label label1 = new Label("l1","v1");
Label label2 = new Label("l2","v2");
labelList1.add(label1);
labelList2.add(label2);
labelCache.addJobIdToLabelCache(jId,labelList1);
labelCache.addJobIdToLabelCache(jId2,labelList2);
List<Label> labelListAll = new ArrayList<>();
labelListAll.addAll(labelList1);
labelListAll.addAll(labelList2);
Set<JobId> jobIdsMatchingLabels = labelCache.getJobIdsMatchingLabels(labelListAll, false);
assertEquals(2, jobIdsMatchingLabels.size());
boolean foundJob1 = false;
boolean foundJob2 = false;
for(JobId jobId : jobIdsMatchingLabels) {
if(jobId.equals(jId)) {
foundJob1 = true;
} else if(jobId.equals(jId2)) {
foundJob2 = true;
}
}
assertTrue(foundJob1 && foundJob2);
}
}
| 7,885 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/JobClusterTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import static io.mantisrx.master.jobcluster.JobClusterActor.JobInfo;
import static io.mantisrx.master.jobcluster.JobClusterActor.props;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListArchivedWorkersResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobCriteria;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobIdsResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ListJobsResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ResubmitWorkerResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.ScaleStageResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterRequest;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLAResponse;
import static io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse;
import static java.util.Optional.empty;
import static java.util.Optional.of;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.testkit.javadsl.TestKit;
import com.netflix.mantis.master.scheduler.TestHelpers;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import io.mantisrx.common.Label;
import io.mantisrx.master.api.akka.route.proto.JobClusterProtoAdapter;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.job.IMantisJobMetadata;
import io.mantisrx.master.jobcluster.job.IMantisStageMetadata;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.JobTestHelper;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataImpl;
import io.mantisrx.master.jobcluster.job.MantisJobMetadataView;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.master.jobcluster.job.worker.WorkerTerminate;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLARequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterProto;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.WorkerMigrationConfig.MigrationStrategyEnum;
import io.mantisrx.runtime.command.InvalidJobException;
import io.mantisrx.runtime.descriptor.DeploymentStrategy;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageDeploymentStrategy;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.Status.TYPE;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.domain.*;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.persistence.KeyValueBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import io.mantisrx.server.master.store.FileBasedStore;
import io.mantisrx.server.master.store.NamedJob;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.io.File;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.Mockito;
import org.mockito.stubbing.Answer;
import rx.schedulers.Schedulers;
import rx.subjects.BehaviorSubject;
public class JobClusterTest {
public static final SLA NO_OP_SLA = new SLA(0, 0, null, null);
public static final MachineDefinition DEFAULT_MACHINE_DEFINITION = new MachineDefinition(1, 10, 10, 10, 2);
public static final SchedulingInfo SINGLE_WORKER_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(DEFAULT_MACHINE_DEFINITION, Lists.newArrayList(), Lists.newArrayList()).build();
public static final SchedulingInfo TWO_WORKER_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerStage(2, DEFAULT_MACHINE_DEFINITION).build();
public static final JobOwner DEFAULT_JOB_OWNER = new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo");
final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl());
static ActorSystem system;
//private static TestKit probe;
private MantisJobStore jobStore;
private IMantisPersistenceProvider storageProvider;
private static LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl());
private static final String user = "mantis";
@Rule
public TemporaryFolder rootDir = new TemporaryFolder();
private CostsCalculator costsCalculator;
@BeforeClass
public static void setup() {
Config config = ConfigFactory.parseString("akka {\n" +
" loggers = [\"akka.testkit.TestEventListener\"]\n" +
" loglevel = \"WARNING\"\n" +
" stdout-loglevel = \"WARNING\"\n" +
" test.single-expect-default = 1000 millis\n" +
"}\n");
system = ActorSystem.create("JobClusterTest", config.withFallback(ConfigFactory.load()));
JobTestHelper.createDirsIfRequired();
TestHelpers.setupMasterConfig();
}
@AfterClass
public static void tearDown() {
JobTestHelper.deleteAllFiles();
TestKit.shutdownActorSystem(system);
system = null;
}
@Before
public void setupStorageProvider() {
storageProvider = new KeyValueBasedPersistenceProvider(
new FileBasedStore(rootDir.getRoot()),
eventPublisher);
jobStore = new MantisJobStore(storageProvider);
costsCalculator = CostsCalculator.noop();
}
private void deleteFiles(String dirName, final String jobId, final String filePrefix) {
File spoolDir = new File(dirName);
if (spoolDir != null) {
for (File stageFile : spoolDir.listFiles((dir, name) -> {
return name.startsWith(filePrefix + jobId + "-");
})) {
stageFile.delete();
}
}
}
private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName) {
return createFakeJobClusterDefn(clusterName, Lists.newArrayList());
}
private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName, List<Label> labels) {
return createFakeJobClusterDefn(clusterName, labels, NO_OP_SLA);
}
private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName, List<Label> labels, SLA sla) {
return createFakeJobClusterDefn(clusterName,labels, sla, SINGLE_WORKER_SCHED_INFO);
}
private JobClusterDefinitionImpl createFakeJobClusterDefn(String clusterName, List<Label> labels, SLA sla, SchedulingInfo schedulingInfo) {
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName("myart")
.withSchedulingInfo(schedulingInfo)
.withVersion("0.0.1")
.build();
return new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(clusterConfig)
.withName(clusterName)
.withParameters(Lists.newArrayList())
.withLabels(labels)
.withUser(user)
.withIsReadyForJobMaster(true)
.withOwner(DEFAULT_JOB_OWNER)
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.withSla(sla)
.build();
}
private JobDefinition createJob(String name, List<Label> labelList) throws InvalidJobException {
return createJob(name, 0, MantisJobDurationType.Perpetual,null, SINGLE_WORKER_SCHED_INFO, labelList, null);
}
private JobDefinition createJob(String name2, long subsTimeoutSecs, MantisJobDurationType durationType) throws InvalidJobException {
return createJob(name2, subsTimeoutSecs, durationType, (String) null);
}
private JobDefinition createJob(String name2, MantisJobDurationType durationType, SchedulingInfo schedulingInfo)
throws InvalidJobException {
return createJob(name2, 1, durationType, null, schedulingInfo, Lists.newArrayList(), null);
}
private JobDefinition createJob(String name2, MantisJobDurationType durationType, SchedulingInfo schedulingInfo,
DeploymentStrategy deploymentStrategy)
throws InvalidJobException {
return createJob(name2, 1, durationType, null, schedulingInfo, Lists.newArrayList(), deploymentStrategy);
}
private JobDefinition createJob(String name2, MantisJobDurationType durationType, SchedulingInfo schedulingInfo,
String artifactName, String artifactVersion) throws InvalidJobException {
return createJob(name2, 1, durationType, null, schedulingInfo,
Lists.newArrayList(), artifactName, artifactVersion, null);
}
private JobDefinition createJob(String name2, MantisJobDurationType durationType, SchedulingInfo schedulingInfo,
String artifactName, String artifactVersion, DeploymentStrategy deploymentStrategy) throws InvalidJobException {
return createJob(name2, 1, durationType, null, schedulingInfo,
Lists.newArrayList(), artifactName, artifactVersion, deploymentStrategy);
}
private JobDefinition createJob(String name2, long subsTimeoutSecs, MantisJobDurationType durationType, String userProvidedType) throws InvalidJobException {
return createJob(name2, subsTimeoutSecs, durationType, userProvidedType, SINGLE_WORKER_SCHED_INFO, Lists.newArrayList(), null);
}
private JobDefinition createJob(String name2, long subsTimeoutSecs, MantisJobDurationType durationType,
String userProvidedType, SchedulingInfo schedulingInfo, List<Label> labelList)
throws InvalidJobException {
return createJob(name2, subsTimeoutSecs, durationType, userProvidedType, schedulingInfo, labelList,
"myart", null, null);
}
private JobDefinition createJob(String name2, long subsTimeoutSecs, MantisJobDurationType durationType,
String userProvidedType, SchedulingInfo schedulingInfo, List<Label> labelList,
DeploymentStrategy deploymentStrategy)
throws InvalidJobException {
return createJob(name2, subsTimeoutSecs, durationType, userProvidedType, schedulingInfo, labelList,
"myart", null, deploymentStrategy);
}
private JobDefinition createJob(String name2, long subsTimeoutSecs, MantisJobDurationType durationType,
String userProvidedType, SchedulingInfo schedulingInfo, List<Label> labelList,
String artifactName, String artifactVersion, DeploymentStrategy deploymentStrategy)
throws InvalidJobException {
return new JobDefinition.Builder()
.withName(name2)
.withParameters(Lists.newArrayList())
.withLabels(labelList)
.withSchedulingInfo(schedulingInfo)
.withDeploymentStrategy(deploymentStrategy)
.withArtifactName(artifactName)
.withVersion(artifactVersion)
.withSubscriptionTimeoutSecs(subsTimeoutSecs)
.withUser("njoshi")
.withJobSla(new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, userProvidedType))
.build();
}
private JobDefinition createJob(String name2) throws InvalidJobException {
return createJob(name2, 0, MantisJobDurationType.Perpetual, null);
}
// CLUSTER CRUD TESTS ///////////////////////////////////////////////////////////////////////////
@Test
public void testJobClusterCreate() throws Exception {
String name = "testJobClusterCreate";
TestKit probe = new TestKit(system);
MantisSchedulerFactory schedulerMock = mock(MantisSchedulerFactory.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(name);
ActorRef jobClusterActor = system.actorOf(props(name, jobStoreMock, schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
jobClusterActor.tell(new GetJobClusterRequest(name), probe.getRef());
GetJobClusterResponse resp2 = probe.expectMsgClass(GetJobClusterResponse.class);
System.out.println("resp2 " + resp2);
assertEquals(SUCCESS, resp2.responseCode);
assertEquals(name, resp2.getJobCluster().get().getName());
assertEquals("Nick", resp2.getJobCluster().get().getOwner().getName());
assertTrue(resp2.getJobCluster().get().getLabels().isEmpty());
assertEquals(1,resp2.getJobCluster().get().getJars().size());
jobClusterActor.tell(new JobClusterProto.DeleteJobClusterRequest(user, name, probe.getRef()), probe.getRef());
JobClusterProto.DeleteJobClusterResponse resp3 = probe.expectMsgClass(JobClusterProto.DeleteJobClusterResponse.class);
assertEquals(SUCCESS, resp3.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
//verify(jobStoreMock, times(1)).storeNewJob(any());
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(1)).deleteJobCluster(name);
probe.getSystem().stop(jobClusterActor);
}
@Test
public void testJobClusterEnable() {
try {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterEnable";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
String jobId = clusterName + "-1";
JobDefinition jobDefn = createJob(clusterName);
IMantisJobMetadata job1 = new MantisJobMetadataImpl.Builder()
.withJobDefinition(jobDefn)
.withJobState(JobState.Completed)
.withJobId(new JobId(clusterName,1))
.withNextWorkerNumToUse(2)
.withSubmittedAt(1000)
.build();
when(jobStoreMock.getArchivedJob(jobId)).thenReturn(of(job1));
SLA sla = new SLA(1,1,null,null);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, Lists.newArrayList(),sla);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched);
jobClusterActor.tell(new DisableJobClusterRequest(clusterName,user),probe.getRef());
DisableJobClusterResponse resp = probe.expectMsgClass(DisableJobClusterResponse.class);
assertTrue(BaseResponse.ResponseCode.SUCCESS.equals(resp.responseCode));
jobClusterActor.tell(new EnableJobClusterRequest(clusterName,user),probe.getRef());
EnableJobClusterResponse enableResp = probe.expectMsgClass(EnableJobClusterResponse.class);
assertTrue(BaseResponse.ResponseCode.SUCCESS.equals(enableResp.responseCode));
// first job was killed during disable
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Completed);
// Sla will cause new job to get launched
String jobId2 = clusterName + "-2";
boolean accepted = false;
int cnt = 0;
// try a few times for timing issue
while(cnt < 50) {
jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId2).get()), probe.getRef());
GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class);
if(detailsResp.responseCode.equals(BaseResponse.ResponseCode.SUCCESS)) {
accepted = true;
break;
}
Thread.sleep(1000);
}
assertTrue(accepted);
// JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Completed);
// JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor);
// verify(jobStoreMock, times(1)).createJobCluster(any());
// verify(jobStoreMock, times(1)).updateJobCluster(any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
@Test
public void testJobClusterUpdateAndDelete() throws Exception {
TestKit probe = new TestKit(system);
List<Label> labels = Lists.newLinkedList();
Label l = new Label("labelname","labelvalue");
labels.add(l);
String clusterName = "testJobClusterUpdateAndDelete";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName("myart")
.withSchedulingInfo(SINGLE_WORKER_SCHED_INFO)
.withVersion("0.0.2")
.build();
final JobClusterDefinitionImpl updatedJobCluster = new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(clusterConfig)
.withName(clusterName)
.withParameters(Lists.newArrayList())
.withLabels(labels)
.withUser(user)
.withIsReadyForJobMaster(true)
.withOwner(DEFAULT_JOB_OWNER)
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.withSla(NO_OP_SLA)
.build();
jobClusterActor.tell(new UpdateJobClusterRequest(updatedJobCluster, "user"), probe.getRef());
UpdateJobClusterResponse resp = probe.expectMsgClass(UpdateJobClusterResponse.class);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp3.responseCode);
assertTrue(resp3.getJobCluster() != null);
System.out.println("Job cluster " + resp3.getJobCluster());
assertEquals(clusterName, resp3.getJobCluster().get().getName());
System.out.println("Updated job cluster " + resp3.getJobCluster());
assertEquals(1, resp3.getJobCluster().get().getLabels().size());
assertEquals("labelname", resp3.getJobCluster().get().getLabels().get(0).getName());
jobClusterActor.tell(new JobClusterProto.DeleteJobClusterRequest(user, clusterName, probe.getRef()), probe.getRef());
JobClusterProto.DeleteJobClusterResponse resp4 = probe.expectMsgClass(JobClusterProto.DeleteJobClusterResponse.class);
assertEquals(SUCCESS, resp4.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(1)).updateJobCluster(any());
verify(jobStoreMock, times(1)).deleteJobCluster(clusterName);
}
@Test
public void testJobClusterUpdateFailsIfArtifactNotUnique() throws Exception {
TestKit probe = new TestKit(system);
List<Label> labels = Lists.newLinkedList();
Label l = new Label("labelname","labelvalue");
labels.add(l);
String clusterName = "testJobClusterUpdateFailsIfArtifactNotUnique";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
jobClusterActor.tell(new UpdateJobClusterRequest(fakeJobCluster, "user"), probe.getRef());
UpdateJobClusterResponse resp = probe.expectMsgClass(UpdateJobClusterResponse.class);
assertEquals(CLIENT_ERROR, resp.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(0)).updateJobCluster(any());
}
@Test
public void testJobClusterDeleteFailsIfJobsActive() throws Exception {
TestKit probe = new TestKit(system);
List<Label> labels = Lists.newLinkedList();
Label l = new Label("labelname","labelvalue");
labels.add(l);
String clusterName = "testJobClusterDeleteFailsIfJobsActive";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
jobClusterActor.tell(new JobClusterProto.DeleteJobClusterRequest(user, clusterName, probe.getRef()), probe.getRef());
JobClusterProto.DeleteJobClusterResponse resp4 = probe.expectMsgClass(JobClusterProto.DeleteJobClusterResponse.class);
assertEquals(CLIENT_ERROR, resp4.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(1)).updateJobCluster(any());
verify(jobStoreMock, times(0)).deleteJobCluster(clusterName);
}
@Test
public void testJobClusterDeletePurgesCompletedJobs() throws Exception {
TestKit probe = new TestKit(system);
List<Label> labels = Lists.newLinkedList();
Label l = new Label("labelname","labelvalue");
labels.add(l);
String clusterName = "testJobClusterDeletePurgesCompletedJobs";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
jobClusterActor.tell(new DisableJobClusterRequest(clusterName, "user"), probe.getRef());
DisableJobClusterResponse disableResp = probe.expectMsgClass(DisableJobClusterResponse.class);
assertEquals(SUCCESS, disableResp.responseCode);
Thread.sleep(1000);
jobClusterActor.tell(new JobClusterProto.DeleteJobClusterRequest(user, clusterName, probe.getRef()), probe.getRef());
JobClusterProto.DeleteJobClusterResponse resp4 = probe.expectMsgClass(JobClusterProto.DeleteJobClusterResponse.class);
assertEquals(SUCCESS, resp4.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(2)).updateJobCluster(any());
verify(jobStoreMock, times(1)).deleteJobCluster(clusterName);
verify(jobStoreMock, times(1)).storeCompletedJobForCluster(any(),any());
verify(jobStoreMock, times(1)).deleteJob("testJobClusterDeletePurgesCompletedJobs-1");
}
@Test
public void testJobClusterDisable() throws InterruptedException {
TestKit probe = new TestKit(system);
CountDownLatch storeCompletedCalled = new CountDownLatch(1);
String clusterName = "testJobClusterDisable";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
IMantisJobMetadata completedJobMock = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName, 1))
.withJobDefinition(jobDefn)
.withJobState(JobState.Completed)
.build();
when(jobStoreMock.getArchivedJob(any())).thenReturn(of(completedJobMock));
doAnswer((Answer) invocation -> {
storeCompletedCalled.countDown();
return null;
}).when(jobStoreMock).storeCompletedJobForCluster(any(),any());
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
jobClusterActor.tell(new DisableJobClusterRequest(clusterName,"user"), probe.getRef());
DisableJobClusterResponse disableResp = probe.expectMsgClass(DisableJobClusterResponse.class);
assertEquals(SUCCESS, disableResp.responseCode);
jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse getJobClusterResp = probe.expectMsgClass(GetJobClusterResponse.class);
assertTrue(getJobClusterResp.getJobCluster().get().isDisabled());
jobClusterActor.tell(new GetJobDetailsRequest(clusterName, JobId.fromId(jobId).get()),probe.getRef());
GetJobDetailsResponse jobDetailsResp = probe.expectMsgClass(GetJobDetailsResponse.class);
assertEquals(SUCCESS, jobDetailsResp.responseCode);
assertEquals(jobId, jobDetailsResp.getJobMetadata().get().getJobId().getId());
assertEquals(JobState.Completed, jobDetailsResp.getJobMetadata().get().getState());
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(2)).updateJobCluster(any());
verify(jobStoreMock, times(1)).storeNewJob(any());
verify(jobStoreMock, times(1)).updateStage(any());
verify(jobStoreMock,times(2)).updateJob(any());
verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
storeCompletedCalled.await(1, TimeUnit.SECONDS);
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
///////////////////////////////////////////// CLUSTER CRUD END ///////////////////////////////////////////////////////
////////////////////////////// CLUSTER UPDATE FLAVORS ///////////////////////////////////////////////////////////////
@Test
public void testJobClusterSLAUpdate() throws Exception {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterSLAUpdate";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
SLA newSLA = new SLA(0,10,null,null);
UpdateJobClusterSLARequest updateSlaReq = new UpdateJobClusterSLARequest(clusterName, newSLA.getMin(), newSLA.getMax(), "user");
jobClusterActor.tell(updateSlaReq, probe.getRef());
UpdateJobClusterSLAResponse resp = probe.expectMsgClass(UpdateJobClusterSLAResponse.class);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp3.responseCode);
assertTrue(resp3.getJobCluster() != null);
System.out.println("Job cluster " + resp3.getJobCluster());
assertEquals(clusterName, resp3.getJobCluster().get().getName());
System.out.println("Updated job cluster " + resp3.getJobCluster());
assertEquals(newSLA, DataFormatAdapter.convertToSLA(resp3.getJobCluster().get().getSla()));
verify(jobStoreMock, times(1)).updateJobCluster(any());
verify(jobStoreMock, times(1)).createJobCluster(any());
}
@Test
public void testJobClusterMigrationConfigUpdate() throws Exception {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterMigrationConfigUpdate";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
WorkerMigrationConfig newConfig = new WorkerMigrationConfig(MigrationStrategyEnum.ONE_WORKER, "{'name':'value'}");
UpdateJobClusterWorkerMigrationStrategyRequest updateMigrationConfigReq = new UpdateJobClusterWorkerMigrationStrategyRequest(clusterName, newConfig, "user");
jobClusterActor.tell(updateMigrationConfigReq, probe.getRef());
UpdateJobClusterWorkerMigrationStrategyResponse resp = probe.expectMsgClass(UpdateJobClusterWorkerMigrationStrategyResponse.class);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp3.responseCode);
assertTrue(resp3.getJobCluster() != null);
System.out.println("Job cluster " + resp3.getJobCluster());
assertEquals(clusterName, resp3.getJobCluster().get().getName());
System.out.println("Updated job cluster " + resp3.getJobCluster());
assertEquals(MigrationStrategyEnum.ONE_WORKER, resp3.getJobCluster().get().getMigrationConfig().getStrategy());
verify(jobStoreMock, times(1)).updateJobCluster(any());
verify(jobStoreMock, times(1)).createJobCluster(any());
}
@Test
public void testJobClusterArtifactUpdate() throws Exception {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterArtifactUpdate";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
UpdateJobClusterArtifactRequest req = new UpdateJobClusterArtifactRequest(clusterName, "a1", "1.0.1", true, "user");
jobClusterActor.tell(req, probe.getRef());
UpdateJobClusterArtifactResponse resp = probe.expectMsgClass(UpdateJobClusterArtifactResponse.class);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp3.responseCode);
assertTrue(resp3.getJobCluster() != null);
System.out.println("Job cluster " + resp3.getJobCluster());
assertEquals(clusterName, resp3.getJobCluster().get().getName());
System.out.println("Updated job cluster " + resp3.getJobCluster());
assertEquals(2,resp3.getJobCluster().get().getJars().size());
//assertEquals("a1", resp3.getJobCluster().getJobClusterDefinition().getJobClusterConfig().getArtifactName());
assertEquals("1.0.1", resp3.getJobCluster().get().getLatestVersion());
List<NamedJob.Jar> jars = resp3.getJobCluster().get().getJars();
assertTrue(jars.get(jars.size()-1).getUploadedAt() != -1);
verify(jobStoreMock, times(1)).updateJobCluster(any());
verify(jobStoreMock, times(1)).createJobCluster(any());
}
@Test
public void testJobClusterArtifactUpdateNotUniqueFails() throws Exception {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterArtifactUpdateNotUniqueFails";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
UpdateJobClusterArtifactRequest req = new UpdateJobClusterArtifactRequest(clusterName, "a1", "0.0.1", true, "user");
jobClusterActor.tell(req, probe.getRef());
UpdateJobClusterArtifactResponse resp = probe.expectMsgClass(UpdateJobClusterArtifactResponse.class);
assertEquals(CLIENT_ERROR, resp.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp3.responseCode);
assertTrue(resp3.getJobCluster() != null);
System.out.println("Job cluster " + resp3.getJobCluster());
assertEquals(clusterName, resp3.getJobCluster().get().getName());
System.out.println("job cluster " + resp3.getJobCluster());
assertEquals(1,resp3.getJobCluster().get().getJars().size());
//assertEquals("a1", resp3.getJobCluster().getJobClusterDefinition().getJobClusterConfig().getArtifactName());
assertEquals("0.0.1", resp3.getJobCluster().get().getLatestVersion());
verify(jobStoreMock, times(0)).updateJobCluster(any());
verify(jobStoreMock, times(1)).createJobCluster(any());
}
@Test
public void testJobClusterArtifactUpdateMultipleTimes() throws Exception {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterArtifactUpdateMultipleTimes";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
UpdateJobClusterArtifactRequest req = new UpdateJobClusterArtifactRequest(clusterName, "a1", "1.0.1", true, "user");
jobClusterActor.tell(req, probe.getRef());
UpdateJobClusterArtifactResponse resp = probe.expectMsgClass(UpdateJobClusterArtifactResponse.class);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp3.responseCode);
assertTrue(resp3.getJobCluster() != null);
System.out.println("Job cluster " + resp3.getJobCluster());
assertEquals(clusterName, resp3.getJobCluster().get().getName());
System.out.println("Updated job cluster " + resp3.getJobCluster());
//assertEquals("a1", resp3.getJobCluster().getJobClusterDefinition().getJobClusterConfig().getArtifactName());
assertEquals("1.0.1", resp3.getJobCluster().get().getLatestVersion());
List<NamedJob.Jar> jars = resp3.getJobCluster().get().getJars();
System.out.println("jars --> " + jars);
assertEquals(2, jars.size());
// Update again
req = new UpdateJobClusterArtifactRequest(clusterName, "a2", "1.0.3", true, "user");
jobClusterActor.tell(req, probe.getRef());
resp = probe.expectMsgClass(UpdateJobClusterArtifactResponse.class);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
resp3 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp3.responseCode);
assertTrue(resp3.getJobCluster() != null);
System.out.println("Job cluster " + resp3.getJobCluster());
assertEquals(clusterName, resp3.getJobCluster().get().getName());
System.out.println("Updated job cluster " + resp3.getJobCluster());
//assertEquals("a1", resp3.getJobCluster().getJobClusterDefinition().getJobClusterConfig().getArtifactName());
assertEquals("1.0.3", resp3.getJobCluster().get().getLatestVersion());
jars = resp3.getJobCluster().get().getJars();
System.out.println("jars --> " + jars);
assertEquals(3, jars.size());
verify(jobStoreMock, times(2)).updateJobCluster(any());
verify(jobStoreMock, times(1)).createJobCluster(any());
}
@Test
public void testJobClusterInvalidSLAUpdateIgnored() throws Exception {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterInvalidSLAUpdateIgnored";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
UpdateJobClusterSLARequest updateSlaReq = new UpdateJobClusterSLARequest(clusterName, 2, 1, "user");
jobClusterActor.tell(updateSlaReq, probe.getRef());
UpdateJobClusterSLAResponse resp = probe.expectMsgClass(UpdateJobClusterSLAResponse.class);
assertEquals(CLIENT_ERROR, resp.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp3.responseCode);
assertTrue(resp3.getJobCluster() != null);
System.out.println("Job cluster " + resp3.getJobCluster());
assertEquals(clusterName, resp3.getJobCluster().get().getName());
// No changes to original SLA
assertEquals(0, resp3.getJobCluster().get().getSla().getMin());
assertEquals(0, resp3.getJobCluster().get().getSla().getMax());
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(0)).updateJobCluster(any());
}
@Test
public void testJobClusterLabelsUpdate() throws Exception {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterLabelsUpdate";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
// assert initially no labels
jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse resp3 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp3.responseCode);
assertTrue(resp3.getJobCluster() != null);
System.out.println("Job cluster " + resp3.getJobCluster());
assertEquals(clusterName, resp3.getJobCluster().get().getName());
System.out.println("Updated job cluster " + resp3.getJobCluster());
assertEquals(0, resp3.getJobCluster().get().getLabels().size());
// new labels
List<Label> labels = Lists.newLinkedList();
Label l = new Label("labelname","labelvalue");
labels.add(l);
UpdateJobClusterLabelsRequest updateLabelsReq = new UpdateJobClusterLabelsRequest(clusterName, labels, "user");
jobClusterActor.tell(updateLabelsReq, probe.getRef());
UpdateJobClusterLabelsResponse resp = probe.expectMsgClass(UpdateJobClusterLabelsResponse.class);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(jobClusterActor, probe.getLastSender());
// get job cluster details
jobClusterActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
resp3 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp3.responseCode);
assertTrue(resp3.getJobCluster() != null);
assertEquals(clusterName, resp3.getJobCluster().get().getName());
//assert label list is of size 1
assertEquals(1, resp3.getJobCluster().get().getLabels().size());
assertEquals(l, resp3.getJobCluster().get().getLabels().get(0));
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(1)).updateJobCluster(any());
}
////////////////////////////////////// CLUSTER UPDATE FLAVORS END ////////////////////////////////////////////////////
////////////////////////////////////// JOB SUBMIT OPERATIONS /////////////////////////////////////////////////////////////
@Test
public void testJobSubmit() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmit";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
JobTestHelper.killJobSendWorkerTerminatedAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor, new WorkerId(jobId, 0 ,1));
Thread.sleep(500);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(1)).updateJobCluster(any());
verify(jobStoreMock, timeout(2000).times(1)).archiveJob(any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
@Test
public void testJobSubmitWithNoJarAndSchedInfo() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitWithNoJarAndSchedInfo";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = new JobDefinition.Builder()
.withName(clusterName)
.withParameters(Lists.newArrayList())
.withUser("njoshi")
.withSubscriptionTimeoutSecs(300)
.withJobSla(new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, ""))
.build();;
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef());
GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class);
// make sure it inherits from cluster
assertEquals("myart", detailsResp.getJobMetadata().get().getArtifactName());
// inherits cluster scheduling Info
assertEquals(SINGLE_WORKER_SCHED_INFO,detailsResp.getJobMetadata().get().getSchedulingInfo());
//JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(1)).updateJobCluster(any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
@Test
public void testJobSubmitWithVersionAndNoSchedInfo() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitWithVersionAndNoSchedInfo";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName("myart2")
.withSchedulingInfo(TWO_WORKER_SCHED_INFO)
.withVersion("0.0.2")
.build();
final JobClusterDefinitionImpl updatedFakeJobCluster = new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(clusterConfig)
.withName(clusterName)
.withParameters(Lists.newArrayList())
.withUser(user)
.withIsReadyForJobMaster(true)
.withOwner(DEFAULT_JOB_OWNER)
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.withSla(NO_OP_SLA)
.build();
jobClusterActor.tell(new UpdateJobClusterRequest(updatedFakeJobCluster, "user"), probe.getRef());
UpdateJobClusterResponse resp = probe.expectMsgClass(UpdateJobClusterResponse.class);
jobClusterActor.tell(new GetJobClusterRequest(clusterName),probe.getRef());
GetJobClusterResponse getJobClusterResponse = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(2,getJobClusterResponse.getJobCluster().get().getJars().size());
try {
final JobDefinition jobDefn = new JobDefinition.Builder()
.withName(clusterName)
.withParameters(Lists.newArrayList())
.withUser("njoshi")
.withVersion("0.0.2")
.withSubscriptionTimeoutSecs(300)
.withJobSla(new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, ""))
.build();;
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef());
GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class);
// make sure it inherits from cluster
assertEquals("myart2", detailsResp.getJobMetadata().get().getArtifactName());
// inherits cluster scheduling Info corresponding to the given artifact
assertEquals(TWO_WORKER_SCHED_INFO,detailsResp.getJobMetadata().get().getSchedulingInfo());
// Now submit with a different artifact and no scheduling Info
final JobDefinition jobDefn2 = new JobDefinition.Builder()
.withName(clusterName)
.withParameters(Lists.newArrayList())
.withUser("njoshi")
.withVersion("0.0.1")
.withSubscriptionTimeoutSecs(300)
.withJobSla(new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, ""))
.build();;
String jobId2 = clusterName + "-2";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn2, jobId2);
jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId2).get()), probe.getRef());
GetJobDetailsResponse detailsResp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
// make sure it inherits from cluster
assertEquals("myart", detailsResp2.getJobMetadata().get().getArtifactName());
// inherits cluster scheduling Info corresponding to the given artifact
assertEquals(SINGLE_WORKER_SCHED_INFO,detailsResp2.getJobMetadata().get().getSchedulingInfo());
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(3)).updateJobCluster(any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
@Ignore
@Test
public void testJobComplete() {
TestKit probe = new TestKit(system);
String clusterName = "testJobComplete";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(jobId,0,1));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched);
JobTestHelper.sendWorkerCompletedEvent(probe,jobClusterActor,jobId,new WorkerId(jobId, 0,1));
JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor, jobId, JobState.Completed);
verify(jobStoreMock, timeout(2000).times(1)).archiveJob(any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
}
@Test
public void testJobKillTriggersSLAToLaunchNew() {
TestKit probe = new TestKit(system);
String clusterName = "testJobKillTriggersSLAToLaunchNew";
MantisSchedulerFactory schedulerMockFactory = mock(MantisSchedulerFactory.class);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
when(schedulerMockFactory.forJob(any())).thenReturn(schedulerMock);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
SLA sla = new SLA(1,1,null,null);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, Lists.newArrayList(),sla);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMockFactory, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
String jobId = clusterName + "-1";
WorkerId workerId1 = new WorkerId(clusterName, jobId, 0, 1);
doAnswer(invocation -> {
WorkerEvent terminate = new WorkerTerminate(workerId1, WorkerState.Completed, JobCompletedReason.Killed, System.currentTimeMillis());
jobClusterActor.tell(terminate, probe.getRef());
return null;
}).when(schedulerMock).unscheduleWorker(any(),any());
try {
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
JobId jId = new JobId(clusterName,1);
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched);
JobTestHelper.killJobAndVerify(probe,clusterName,jId,jobClusterActor);
Thread.sleep(500);
// a new job should have been submitted
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, clusterName + "-2", SUCCESS, JobState.Accepted);
//JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor);
// verify(jobStoreMock, times(1)).createJobCluster(any());
// verify(jobStoreMock, times(1)).updateJobCluster(any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
// TODO
// TODO @Test
public void testJobSubmitTriggersSLAToKillOld() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitTriggersSLAToKillOld";
MantisSchedulerFactory schedulerMockFactory = mock(MantisSchedulerFactory.class);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
when(schedulerMockFactory.forJob(any())).thenReturn(schedulerMock);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
SLA sla = new SLA(1,1,null,null);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, Lists.newArrayList(),sla);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMockFactory, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
String jobId = clusterName + "-1";
WorkerId workerId1 = new WorkerId(clusterName, jobId, 0, 1);
doAnswer(invocation -> {
WorkerEvent terminate = new WorkerTerminate(workerId1, WorkerState.Completed, JobCompletedReason.Killed, System.currentTimeMillis());
jobClusterActor.tell(terminate, probe.getRef());
return null;
}).when(schedulerMock).unscheduleWorker(any(),any());
try {
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched);
// submit 2nd job
String jobId2 = clusterName + "-2";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId2);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId2, SUCCESS, JobState.Accepted);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId2,1,new WorkerId(clusterName,jobId2,0,1));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId2, SUCCESS, JobState.Launched);
boolean completed = false;
assertTrue(JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor,jobId,JobState.Completed));
// jobClusterActor.tell(new ListJobIdsRequest(), probe.getRef());
// ListJobIdsResponse listJobIdsResponse = probe.expectMsgClass(ListJobIdsResponse.class);
// assertEquals(SUCCESS, listJobIdsResponse.responseCode);
// assertEquals(1,listJobIdsResponse.getJobIds().size());
// assertEquals(jobId2, listJobIdsResponse.getJobIds().get(0).getId());
// // try a few times for timing issue
// for(int i=0; i<10; i++) {
// jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef());
// GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class);
// if(JobState.Completed.equals(detailsResp.getJobMetadata().get().getState())) {
// completed = true;
// break;
// }
// }
// assertTrue(completed);
// JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Completed);
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor);
// verify(jobStoreMock, times(1)).createJobCluster(any());
// verify(jobStoreMock, times(1)).updateJobCluster(any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
//TODO @Test
public void testJobSubmitTriggersSLAToKillOldHandlesErrors() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitTriggersSLAToKillOldHandlesErrors";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
SLA sla = new SLA(1,1,null,null);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, Lists.newArrayList(),sla);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
doThrow(new NullPointerException("NPE archiving worker")).when(jobStoreMock).archiveWorker(any());
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched);
// submit 2nd job
String jobId2 = clusterName + "-2";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId2);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId2, SUCCESS, JobState.Accepted);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId2,1,new WorkerId(clusterName,jobId2,0,1));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId2, SUCCESS, JobState.Launched);
boolean completed = false;
assertTrue(JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor,jobId,JobState.Completed));
// try a few times for timing issue
// for(int i=0; i<10; i++) {
// jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef());
// GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class);
// if(JobState.Completed.equals(detailsResp.getJobMetadata().get().getState())) {
// completed = true;
// break;
// }
// }
// assertTrue(completed);
jobClusterActor.tell(new ListJobIdsRequest(), probe.getRef());
ListJobIdsResponse listResp = probe.expectMsgClass(ListJobIdsResponse.class);
assertEquals(1,listResp.getJobIds().size());
assertEquals(jobId2, listResp.getJobIds().get(0).getJobId());
// JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Completed);
//JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor);
// verify(jobStoreMock, times(1)).createJobCluster(any());
// verify(jobStoreMock, times(1)).updateJobCluster(any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
/**
* {@see <a href="https://github.com/Netflix/mantis/issues/195">Github issue</a> for more context}
*/
@Ignore
@Test
public void testCronTriggersSLAToKillOld() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitTriggersSLAToKillOld";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
SLA sla = new SLA(1,1,"0/1 * * * * ?",IJobClusterDefinition.CronPolicy.KEEP_NEW);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, Lists.newArrayList(),sla);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched);
// try a few times for timing issue
String jobId2 = clusterName + "-2";
assertTrue(JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor,jobId2,JobState.Accepted));
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId2,1,new WorkerId(clusterName,jobId2,0,1));
assertTrue(JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor,jobId,JobState.Completed));
// verify(jobStoreMock, times(1)).createJobCluster(any());
// verify(jobStoreMock, times(1)).updateJobCluster(any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
@Test
public void testJobSubmitWithUnique() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitWithUnique";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient, "mytype");
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(jobId,0,1));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched);
jobClusterActor.tell(new SubmitJobRequest(clusterName,"user", jobDefn), probe.getRef());
SubmitJobResponse submitResponse = probe.expectMsgClass(SubmitJobResponse.class);
// Get the same job id back
assertTrue(submitResponse.getJobId().isPresent());
assertEquals(jobId,submitResponse.getJobId().get().getId());
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(1)).updateJobCluster(any());
verify(jobStoreMock, times(1)).storeNewJob(any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
}
@Test
public void testJobSubmitWithoutInheritInstance() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitWithInheritInstance";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
// default job with 1 stage == (1 worker)
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
// submit another job this time with job with inheritInstance enabled
final String jobId2 = clusterName + "-2";
final SchedulingInfo schedulingInfo = new SchedulingInfo.Builder()
.numberOfStages(1)
.multiWorkerStage(3, DEFAULT_MACHINE_DEFINITION)
.build();
final JobDefinition jobDefn2Workers = createJob(clusterName, MantisJobDurationType.Transient, schedulingInfo);
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn2Workers, jobId2);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId2, SUCCESS, JobState.Accepted);
// verify instance count is from previous job 1.
final JobId jobId2Id = JobId.fromId(jobId2).get();
jobClusterActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId2Id), probe.getRef());
JobClusterManagerProto.GetJobDetailsResponse detailsResp = probe.expectMsgClass(Duration.ofSeconds(60), JobClusterManagerProto.GetJobDetailsResponse.class);
assertTrue(detailsResp.getJobMetadata().isPresent());
assertEquals(jobId2, detailsResp.getJobMetadata().get().getJobId().getId());
final SchedulingInfo actualSchedulingInfo = detailsResp.getJobMetadata().get().getSchedulingInfo();
assertEquals(1, actualSchedulingInfo.getStages().size());
assertEquals(3, actualSchedulingInfo.forStage(1).getNumberOfInstances());
JobTestHelper.killJobAndVerify(probe, clusterName, jobId2Id, jobClusterActor);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(2)).updateJobCluster(any());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testJobSubmitWithInheritInstanceFlagsSingleStage() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitWithInheritInstance";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
// default job with 1 stage == (1 worker)
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
// submit another job this time with job with inheritInstance enabled
final String jobId2 = clusterName + "-2";
final SchedulingInfo schedulingInfo = new SchedulingInfo.Builder()
.numberOfStages(1)
.multiWorkerStage(3, DEFAULT_MACHINE_DEFINITION)
.build();
final DeploymentStrategy deploymentStrategy = DeploymentStrategy.builder()
.stage(1, StageDeploymentStrategy.builder().inheritInstanceCount(true).build())
.build();
final JobDefinition jobDefn2Workers = createJob(
clusterName, MantisJobDurationType.Transient, schedulingInfo, deploymentStrategy);
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn2Workers, jobId2);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId2, SUCCESS, JobState.Accepted);
// verify instance count is from previous job 1.
final JobId jobId2Id = JobId.fromId(jobId2).get();
jobClusterActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId2Id), probe.getRef());
JobClusterManagerProto.GetJobDetailsResponse detailsResp = probe.expectMsgClass(Duration.ofSeconds(60), JobClusterManagerProto.GetJobDetailsResponse.class);
assertTrue(detailsResp.getJobMetadata().isPresent());
assertEquals(jobId2, detailsResp.getJobMetadata().get().getJobId().getId());
final SchedulingInfo actualSchedulingInfo = detailsResp.getJobMetadata().get().getSchedulingInfo();
assertEquals(1, actualSchedulingInfo.getStages().size());
assertEquals(1, actualSchedulingInfo.forStage(1).getNumberOfInstances());
JobTestHelper.killJobAndVerify(probe, clusterName, jobId2Id, jobClusterActor);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(2)).updateJobCluster(any());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testJobSubmitWithInheritInstanceFlagsMultiStage() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitWithInheritInstance";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
// default job with 3 stage == (2 worker)
final SchedulingInfo schedulingInfo1 = new SchedulingInfo.Builder()
.numberOfStages(4)
.multiWorkerStage(2, DEFAULT_MACHINE_DEFINITION)
.multiWorkerStage(2, DEFAULT_MACHINE_DEFINITION)
.multiWorkerStage(2, DEFAULT_MACHINE_DEFINITION)
.multiWorkerStage(2, DEFAULT_MACHINE_DEFINITION)
.build();
final JobClusterDefinitionImpl fakeJobCluster =
createFakeJobClusterDefn(clusterName, Lists.newArrayList(), NO_OP_SLA, schedulingInfo1);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(
fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp =
probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = createJob(clusterName,MantisJobDurationType.Transient, schedulingInfo1);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
// submit another job this time with job, in which some stages with inheritInstance enabled
final String jobId2 = clusterName + "-2";
final SchedulingInfo schedulingInfo2 = new SchedulingInfo.Builder()
.numberOfStages(4)
.multiWorkerStage(3, DEFAULT_MACHINE_DEFINITION)
.multiWorkerStage(4, DEFAULT_MACHINE_DEFINITION)
.multiWorkerStage(5, DEFAULT_MACHINE_DEFINITION)
.multiWorkerStage(6, DEFAULT_MACHINE_DEFINITION)
.build();
final DeploymentStrategy deploymentStrategy = DeploymentStrategy.builder()
.stage(1, StageDeploymentStrategy.builder().inheritInstanceCount(true).build())
.stage(3, StageDeploymentStrategy.builder().inheritInstanceCount(true).build())
.stage(4, StageDeploymentStrategy.builder().inheritInstanceCount(false).build())
.build();
final String artifactV2 = "artVer-2";
final JobDefinition jobDefn2Workers = createJob(clusterName, MantisJobDurationType.Transient, schedulingInfo2,
jobDefn.getArtifactName(), artifactV2, deploymentStrategy);
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn2Workers, jobId2);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId2, SUCCESS, JobState.Accepted);
// verify instance count is from previous job.
final JobId jobId2Id = JobId.fromId(jobId2).get();
jobClusterActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId2Id), probe.getRef());
JobClusterManagerProto.GetJobDetailsResponse detailsResp =
probe.expectMsgClass(Duration.ofSeconds(60), JobClusterManagerProto.GetJobDetailsResponse.class);
assertTrue(detailsResp.getJobMetadata().isPresent());
assertEquals(jobId2, detailsResp.getJobMetadata().get().getJobId().getId());
assertEquals(artifactV2, detailsResp.getJobMetadata().get().getJobDefinition().getVersion());
final SchedulingInfo actualSchedulingInfo = detailsResp.getJobMetadata().get().getSchedulingInfo();
assertEquals(4, actualSchedulingInfo.getStages().size());
// stage 1/3 inherits from previous job while stage 2 should apply new instance count.
assertEquals(2, actualSchedulingInfo.forStage(1).getNumberOfInstances());
assertEquals(4, actualSchedulingInfo.forStage(2).getNumberOfInstances());
assertEquals(2, actualSchedulingInfo.forStage(3).getNumberOfInstances());
assertEquals(6, actualSchedulingInfo.forStage(4).getNumberOfInstances());
JobTestHelper.killJobAndVerify(probe, clusterName, jobId2Id, jobClusterActor);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(2)).updateJobCluster(any());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testJobSubmitWithInheritInstanceFlagsScaled() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitWithInheritInstance";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
// default job with 3 stage == (2 worker)
final SchedulingInfo schedulingInfo1 = new SchedulingInfo.Builder()
.numberOfStages(2)
.multiWorkerStage(1, DEFAULT_MACHINE_DEFINITION, true)
.multiWorkerStage(1, DEFAULT_MACHINE_DEFINITION, true)
.build();
final JobClusterDefinitionImpl fakeJobCluster =
createFakeJobClusterDefn(clusterName, Lists.newArrayList(), NO_OP_SLA, schedulingInfo1);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(
fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp =
probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = createJob(clusterName,MantisJobDurationType.Transient, schedulingInfo1);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobClusterActor, jobId,1, new WorkerId(jobId,0,1));
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobClusterActor, jobId,2, new WorkerId(jobId,0,2));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched);
// try scale job
JobTestHelper.scaleStageAndVerify(probe, jobClusterActor, jobId, 1, 2);
// submit another job this time with job, in which some stages with inheritInstance enabled
final String jobId2 = clusterName + "-2";
final SchedulingInfo schedulingInfo2 = new SchedulingInfo.Builder()
.numberOfStages(2)
.multiWorkerStage(3, DEFAULT_MACHINE_DEFINITION)
.multiWorkerStage(4, DEFAULT_MACHINE_DEFINITION)
.build();
final DeploymentStrategy deploymentStrategy = DeploymentStrategy.builder()
.stage(1, StageDeploymentStrategy.builder().inheritInstanceCount(true).build())
.stage(2, StageDeploymentStrategy.builder().inheritInstanceCount(true).build())
.build();
final JobDefinition jobDefn2Workers = createJob(
clusterName, MantisJobDurationType.Transient, schedulingInfo2, deploymentStrategy);
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn2Workers, jobId2);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId2, SUCCESS, JobState.Accepted);
// verify instance count is from previous job.
final JobId jobId2Id = JobId.fromId(jobId2).get();
jobClusterActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId2Id), probe.getRef());
JobClusterManagerProto.GetJobDetailsResponse detailsResp =
probe.expectMsgClass(Duration.ofSeconds(60), JobClusterManagerProto.GetJobDetailsResponse.class);
assertTrue(detailsResp.getJobMetadata().isPresent());
assertEquals(jobId2, detailsResp.getJobMetadata().get().getJobId().getId());
final SchedulingInfo actualSchedulingInfo = detailsResp.getJobMetadata().get().getSchedulingInfo();
assertEquals(2, actualSchedulingInfo.getStages().size());
// stage 1 inherits from previous job while stage 2 should apply new instance count.
assertEquals(2, actualSchedulingInfo.forStage(1).getNumberOfInstances());
assertEquals(1, actualSchedulingInfo.forStage(2).getNumberOfInstances());
JobTestHelper.killJobAndVerify(probe, clusterName, jobId2Id, jobClusterActor);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(2)).updateJobCluster(any());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testQuickJobSubmit() {
TestKit probe = new TestKit(system);
String clusterName = "testQuickJobSubmit";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
// submit another job this time with no job definition
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, null, clusterName + "-2");
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, clusterName + "-2", SUCCESS, JobState.Accepted);
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(2)).updateJobCluster(any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
@Test
public void testQuickJobSubmitWithNoSchedInfoInPreviousJob() {
TestKit probe = new TestKit(system);
String clusterName = "testQuickJobSubmitWithNoSchedInfoInPreviousJob";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
// job defn with scheduling info
final JobDefinition jobDefn = new JobDefinition.Builder()
.withName(clusterName)
.withParameters(Lists.newArrayList())
.withLabels(Lists.newArrayList())
.withVersion("0.0.1")
.withSubscriptionTimeoutSecs(300)
.withUser("njoshi")
.withJobSla(new JobSla(0, 0, JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, "abc"))
.build();
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
// submit another job this time with no job definition
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, null, clusterName + "-2");
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, clusterName + "-2", SUCCESS, JobState.Accepted);
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(2)).updateJobCluster(any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
@Test
public void testJobSubmitWithNoSchedInfoUsesJobClusterValues() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitWithNoSchedInfoUsesJobClusterValues";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
List<Label> clusterLabels = new ArrayList<>();
Label label = new Label("clabelName", "cLabelValue");
clusterLabels.add(label);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, clusterLabels);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = new JobDefinition.Builder()
.withName(clusterName)
.withVersion("0.0.1")
.withSubscriptionTimeoutSecs(0)
.withUser("njoshi")
.build();
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef());
GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class);
assertEquals(SUCCESS, detailsResp.responseCode);
assertEquals(JobState.Accepted, detailsResp.getJobMetadata().get().getState());
//
assertEquals(clusterLabels.size() + LabelManager.numberOfMandatoryLabels(),detailsResp.getJobMetadata().get().getLabels().size());
// confirm that the clusters labels got inherited
assertEquals(1, detailsResp.getJobMetadata().get()
.getLabels().stream().filter(l -> l.getName().equals("clabelName")).count());
//assertEquals(label, detailsResp.getJobMetadata().get().getLabels().get(0));
// Now submit another one with labels, it should not inherit cluster labels
Label jobLabel = new Label("jobLabel", "jobValue");
List<Label> jobLabelList = new ArrayList<>();
jobLabelList.add(jobLabel);
final JobDefinition jobDefn2 = new JobDefinition.Builder()
.withName(clusterName)
.withVersion("0.0.1")
.withLabels(jobLabelList)
.withSubscriptionTimeoutSecs(0)
.withUser("njoshi")
.build();
String jobId2 = clusterName + "-2";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn2, jobId2);
jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId2).get()), probe.getRef());
GetJobDetailsResponse detailsResp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
assertEquals(SUCCESS, detailsResp2.responseCode);
assertEquals(JobState.Accepted, detailsResp2.getJobMetadata().get().getState());
assertEquals(clusterLabels.size()+2,detailsResp2.getJobMetadata().get().getLabels().size());
// confirm that the clusters labels got inherited
//assertEquals(jobLabel, detailsResp2.getJobMetadata().get().getLabels().get(0));
assertEquals(1, detailsResp2.getJobMetadata().get()
.getLabels().stream().filter(l -> l.getName().equals(jobLabel.getName())).count());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
@Test
public void testQuickJobSubmitWithNoPreviousHistoryFails() {
TestKit probe = new TestKit(system);
String clusterName = "testQuickJobSubmitWithNoPreviousHistoryFails";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = null;
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifyStatus(probe, clusterName, jobClusterActor, jobDefn, jobId, SUCCESS);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(1)).updateJobCluster(any());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testUpdateJobClusterArtifactWithAutoSubmit() {
TestKit probe = new TestKit(system);
try {
String clusterName = "testUpdateJobClusterArtifactWithAutoSubmit";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
// when running concurrently with testGetJobDetailsForArchivedJob the following mock return is needed to avoid null pointer exception.
when(jobStoreMock.getArchivedJob(anyString())).thenReturn(empty());
SLA sla = new SLA(1,1,null,null);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, Lists.newArrayList(),sla);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
// submit job with different scheduling info instance count compared to cluster default one.
final int job1InstanceCnt = 3;
final JobDefinition jobDefn = createJob(
clusterName,
MantisJobDurationType.Transient,
new SchedulingInfo.Builder().numberOfStages(1)
.addStage(fakeJobCluster.getJobClusterConfig().getSchedulingInfo().forStage(1).toBuilder()
.numberOfInstances(job1InstanceCnt)
.build())
.build());
String jobId = clusterName + "-1";
jobClusterActor.tell(new SubmitJobRequest(clusterName,"user", jobDefn), probe.getRef());
SubmitJobResponse submitResponse = probe.expectMsgClass(SubmitJobResponse.class);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1));
JobTestHelper.getJobDetailsAndVerify(probe,jobClusterActor,jobId, BaseResponse.ResponseCode.SUCCESS,JobState.Accepted);
// Update artifact with skip submit = false
String artifact = "newartifact.zip";
String version = "0.0.2";
jobClusterActor.tell(new UpdateJobClusterArtifactRequest(clusterName, artifact, version,false, user), probe.getRef());
UpdateJobClusterArtifactResponse resp = probe.expectMsgClass(UpdateJobClusterArtifactResponse.class);
// ensure new job was launched
String jobId2 = clusterName + "-2";
assertTrue(JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor, jobId2, JobState.Accepted));
// send it worker events to move it to started state
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId2,1,new WorkerId(clusterName,jobId2,0,1));
jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId2).get()), probe.getRef());
GetJobDetailsResponse detailsResp = probe.expectMsgClass(Duration.ofSeconds(5), GetJobDetailsResponse.class);
assertEquals(JobState.Accepted, detailsResp.getJobMetadata().get().getState());
assertEquals(artifact, detailsResp.getJobMetadata().get().getArtifactName());
// verify newly launched job inherited instance count from previous job instance.
AtomicBoolean hasStage = new AtomicBoolean(false);
detailsResp.getJobMetadata().get().getSchedulingInfo().getStages().forEach((stageId, stageInfo) -> {
hasStage.set(true);
assertEquals(
job1InstanceCnt,
detailsResp.getJobMetadata().get().getSchedulingInfo().forStage(stageId).getNumberOfInstances());
});
assertTrue(hasStage.get());
assertTrue(JobTestHelper.verifyJobStatusWithPolling(probe, jobClusterActor, jobId2, JobState.Accepted));
} catch (InvalidJobException e) {
e.printStackTrace();
}
}
@Test
public void testJobSubmitFails() {
TestKit probe = new TestKit(system);
try {
String clusterName = "testJobSubmitFails";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
Mockito.doThrow(Exception.class).when(jobStoreMock).storeNewJob(any());
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
jobClusterActor.tell(new SubmitJobRequest(clusterName,"user", jobDefn), probe.getRef());
SubmitJobResponse submitResponse = probe.expectMsgClass(SubmitJobResponse.class);
assertEquals(SERVER_ERROR, submitResponse.responseCode);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(1)).updateJobCluster(any());
verify(jobStoreMock, times(0)).storeNewWorker(any());
verify(jobStoreMock, times(0)).storeNewWorkers(any(),any());
} catch (Exception e) {
fail();
}
}
////////////////////////////////// JOB SUBMIT OPERATIONS END/////////////////////////////////////////////////////////////
////////////////////////////////// OTHER JOB OPERATIONS //////////////////////////////////////////////////////////////
@Test
public void testGetLastSubmittedJobSubject() {
TestKit probe = new TestKit(system);
String clusterName = "testGetLastSubmittedJobSubject";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
jobClusterActor.tell(new GetLastSubmittedJobIdStreamRequest(clusterName), probe.getRef());
GetLastSubmittedJobIdStreamResponse getLastSubmittedJobIdStreamResponse = probe.expectMsgClass(GetLastSubmittedJobIdStreamResponse.class);
assertEquals(SUCCESS, getLastSubmittedJobIdStreamResponse.responseCode);
CountDownLatch jobIdLatch = new CountDownLatch(1);
assertTrue(getLastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject().isPresent());
BehaviorSubject<JobId> jobIdBehaviorSubject =
getLastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject().get();
jobIdBehaviorSubject.subscribeOn(Schedulers.io()).subscribe((jId) -> {
System.out.println("Got Jid ------> " + jId);
String jIdStr = jId.getId();
assertEquals(clusterName + "-1",jIdStr);
jobIdLatch.countDown();
});
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
jobIdLatch.await(1000,TimeUnit.SECONDS);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
@Test
public void testGetLastSubmittedJobSubjectWithWrongClusterNameFails() {
TestKit probe = new TestKit(system);
String clusterName = "testGetLastSubmittedJobSubjectWithWrongClusterNameFails";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
jobClusterActor.tell(new GetLastSubmittedJobIdStreamRequest("randomCluster"), probe.getRef());
GetLastSubmittedJobIdStreamResponse getLastSubmittedJobIdStreamResponse = probe.expectMsgClass(GetLastSubmittedJobIdStreamResponse.class);
assertEquals(CLIENT_ERROR, getLastSubmittedJobIdStreamResponse.responseCode);
assertTrue(!getLastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject().isPresent());
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
}
@Test
public void testListArchivedWorkers() {
TestKit probe = new TestKit(system);
String clusterName = "testListArchivedWorkers";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisScheduler scheduler = mock(MantisScheduler.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStore, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
String jobId = clusterName + "-1";
try {
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(jobId,0,1));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched);
jobClusterActor.tell(new ResubmitWorkerRequest(jobId,1,user,of("justbecause")),probe.getRef());
ResubmitWorkerResponse resp = probe.expectMsgClass(ResubmitWorkerResponse.class);
assertTrue(BaseResponse.ResponseCode.SUCCESS.equals(resp.responseCode));
jobClusterActor.tell(new ListArchivedWorkersRequest(new JobId(clusterName, 1)),probe.getRef());
ListArchivedWorkersResponse archivedWorkersResponse = probe.expectMsgClass(ListArchivedWorkersResponse.class);
assertEquals(SUCCESS, archivedWorkersResponse.responseCode);
assertEquals(1,archivedWorkersResponse.getWorkerMetadata().size());
IMantisWorkerMetadata archivedWorker = archivedWorkersResponse.getWorkerMetadata().get(0);
assertEquals(1,archivedWorker.getWorkerNumber());
assertEquals(0,archivedWorker.getWorkerIndex());
assertEquals(0, archivedWorker.getResubmitOf());
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
}
@Test
public void testZombieWorkerKilledOnMessage() {
String clusterName = "testZombieWorkerKilledOnMessage";
TestKit probe = new TestKit(system);
MantisSchedulerFactory schedulerMockFactory = mock(MantisSchedulerFactory.class);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
when(schedulerMockFactory.forJob(any())).thenReturn(schedulerMock);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMockFactory, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
String jobId = clusterName + "-1";
when(jobStoreMock.getArchivedJob(jobId))
.thenReturn(Optional.of(
new MantisJobMetadataImpl.Builder().withJobDefinition(mock(JobDefinition.class)).build()));
WorkerId workerId = new WorkerId(clusterName, jobId,0,1);
WorkerEvent heartBeat2 = new WorkerHeartbeat(new Status(jobId, 1, workerId.getWorkerIndex(), workerId.getWorkerNum(), TYPE.HEARTBEAT, "", MantisJobState.Started, System.currentTimeMillis()));
jobClusterActor.tell(heartBeat2, probe.getRef());
jobClusterActor.tell(new GetJobClusterRequest(clusterName),probe.getRef());
GetJobClusterResponse resp = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(clusterName,resp.getJobCluster().get().getName());
verify(schedulerMock,times(1)).unscheduleAndTerminateWorker(workerId,empty());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testZombieWorkerTerminateEventIgnored() {
TestKit probe = new TestKit(system);
String clusterName = "testZombieWorkerTerminateEventIgnored";
MantisSchedulerFactory schedulerMockFactory = mock(MantisSchedulerFactory.class);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
when(schedulerMockFactory.forJob(any())).thenReturn(schedulerMock);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMockFactory, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
String jobId = clusterName + "-1";
WorkerId workerId = new WorkerId(clusterName, jobId,0,1);
JobTestHelper.sendWorkerTerminatedEvent(probe,jobClusterActor,jobId,workerId);
verify(schedulerMock,times(0)).unscheduleAndTerminateWorker(workerId,empty());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testResubmitWorker() {
TestKit probe = new TestKit(system);
String clusterName = "testResubmitWorker";
MantisSchedulerFactory schedulerMockFactory = mock(MantisSchedulerFactory.class);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
when(schedulerMockFactory.forJob(any())).thenReturn(schedulerMock);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMockFactory, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(jobId,0,1));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Launched);
jobClusterActor.tell(new ResubmitWorkerRequest(jobId,1,user,of("justbecause")),probe.getRef());
ResubmitWorkerResponse resp = probe.expectMsgClass(ResubmitWorkerResponse.class);
assertTrue(BaseResponse.ResponseCode.SUCCESS.equals(resp.responseCode));
jobClusterActor.tell(new GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef());
GetJobDetailsResponse detailsResp = probe.expectMsgClass(GetJobDetailsResponse.class);
IMantisWorkerMetadata workerMetadata = detailsResp.getJobMetadata().get().getWorkerByIndex(1,0).get().getMetadata();
assertEquals(2,workerMetadata.getWorkerNumber());
assertEquals(1,workerMetadata.getResubmitOf());
assertEquals(1, workerMetadata.getTotalResubmitCount());
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor);
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(1)).updateJobCluster(any());
verify(jobStoreMock,times(1)).replaceTerminatedWorker(any(),any());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
}
@Test
public void testScaleStage() {
TestKit probe = new TestKit(system);
try {
String clusterName = "testScaleStage";
MantisSchedulerFactory schedulerMockFactory = mock(MantisSchedulerFactory.class);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
when(schedulerMockFactory.forJob(any())).thenReturn(schedulerMock);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, schedulerMockFactory, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>();
smap.put(StageScalingPolicy.ScalingReason.CPU, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.CPU, 0.5, 0.75, null));
smap.put(StageScalingPolicy.ScalingReason.DataDrop, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.DataDrop, 0.0, 2.0, null));
SchedulingInfo SINGLE_WORKER_SCHED_INFO = new SchedulingInfo.Builder().numberOfStages(1)
.multiWorkerScalableStageWithConstraints(1,DEFAULT_MACHINE_DEFINITION,Lists.newArrayList(),Lists.newArrayList(),new StageScalingPolicy(1,1,10,1,1,1, smap)).build();
final JobDefinition jobDefn = createJob(clusterName, 1, MantisJobDurationType.Transient, "USER_TYPE", SINGLE_WORKER_SCHED_INFO, Lists.newArrayList());
String jobId = clusterName + "-1";
jobClusterActor.tell(new SubmitJobRequest(clusterName, "user", jobDefn), probe.getRef());
SubmitJobResponse submitResponse = probe.expectMsgClass(SubmitJobResponse.class);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobClusterActor, jobId, 0, new WorkerId(clusterName, jobId, 0, 1));
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobClusterActor, jobId, 1, new WorkerId(clusterName, jobId, 0, 2));
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, BaseResponse.ResponseCode.SUCCESS, JobState.Launched);
jobClusterActor.tell(new ScaleStageRequest(jobId, 1, 2, user,"No reason"), probe.getRef());
ScaleStageResponse scaleResp = probe.expectMsgClass(ScaleStageResponse.class);
System.out.println("scale Resp: " + scaleResp.message);
assertEquals(SUCCESS, scaleResp.responseCode);
assertEquals(2,scaleResp.getActualNumWorkers());
verify(jobStoreMock, times(1)).storeNewJob(any());
// initial worker
verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
//scale up worker
verify(jobStoreMock, times(1)).storeNewWorker(any());
verify(jobStoreMock, times(6)).updateWorker(any());
verify(jobStoreMock, times(3)).updateJob(any());
// initial worker and scale up worker
verify(schedulerMock, times(3)).scheduleWorker(any());
} catch(Exception e) {
e.printStackTrace();
fail();
}
}
////////////////////////////////// OTHER JOB OPERATIONS //////////////////////////////////////////////////////////////
/////////////////////////// JOB LIST OPERATIONS /////////////////////////////////////////////////////////////////
@Test
public void testGetJobDetailsForArchivedJob() {
TestKit probe = new TestKit(system);
String clusterName = "testGetJobDetailsForArchivedJob";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
String jobId = clusterName + "-1";
try {
when(jobStoreMock.getArchivedJob(jobId)).thenReturn(of(new MantisJobMetadataImpl.Builder()
.withJobState(JobState.Completed)
.withJobId(new JobId(clusterName, 1))
.withSubmittedAt(1000)
.withNextWorkerNumToUse(2)
.build()));
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor);
jobClusterActor.tell(new WorkerTerminate(new WorkerId(clusterName + "-1",0,1),WorkerState.Completed,JobCompletedReason.Killed,System.currentTimeMillis()),probe.getRef());
Thread.sleep(1000);
jobClusterActor.tell(new GetJobDetailsRequest(user,jobId),probe.getRef());
GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class);
//
assertEquals(SUCCESS,resp.responseCode);
assertEquals(JobState.Completed, resp.getJobMetadata().get().getState());
verify(jobStoreMock, times(1)).createJobCluster(any());
verify(jobStoreMock, times(1)).updateJobCluster(any());
// verify(jobStoreMock, times(1)).getArchivedJob(jobId);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
}
@Test
public void testListJobIdsForCluster() throws InvalidJobException {
TestKit probe = new TestKit(system);
String clusterName = "testListJobsForCluster";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
final JobDefinition jobDefn1 = createJob(clusterName);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe,clusterName, jobClusterActor, jobDefn1, jobId);
String jobId2 = clusterName + "-2";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn1, jobId2);
jobClusterActor.tell(new ListJobIdsRequest(), probe.getRef());
ListJobIdsResponse listResp = probe.expectMsgClass(ListJobIdsResponse.class);
assertEquals(SUCCESS, listResp.responseCode);
assertEquals(2, listResp.getJobIds().size());
boolean foundJob1 = false;
boolean foundJob2 = false;
for(JobClusterProtoAdapter.JobIdInfo jobIdInfo : listResp.getJobIds()) {
if(jobIdInfo.getJobId().equals(jobId)) {
foundJob1 = true;
} else if(jobIdInfo.getJobId().equals(jobId2)) {
foundJob2 = true;
}
}
assertTrue(foundJob1);
assertTrue(foundJob2);
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor);
jobClusterActor.tell(new ListJobIdsRequest(empty(),
empty(),
of(true),
empty(),
empty(),
empty()), probe.getRef());
ListJobIdsResponse listResp2 = probe.expectMsgClass(ListJobIdsResponse.class);
assertEquals(SUCCESS, listResp2.responseCode);
assertEquals(1, listResp2.getJobIds().size());
// assertFalse(listResp2.getJobIds().contains(JobId.fromId(jobId).get()));
// assertTrue(listResp2.getJobIds().contains(JobId.fromId(jobId2).get()));
foundJob1 = false;
foundJob2 = false;
for(JobClusterProtoAdapter.JobIdInfo jobIdInfo : listResp2.getJobIds()) {
if(jobIdInfo.getJobId().equals(jobId)) {
foundJob1 = true;
} else if(jobIdInfo.getJobId().equals(jobId2)) {
foundJob2 = true;
}
}
assertFalse(foundJob1);
assertTrue(foundJob2);
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor);
jobClusterActor.tell(new ListJobIdsRequest(), probe.getRef());
ListJobIdsResponse listResp3 = probe.expectMsgClass(ListJobIdsResponse.class);
assertEquals(SUCCESS, listResp3.responseCode);
assertEquals(0, listResp3.getJobIds().size());
// assertFalse(listResp3.getJobIds().contains(JobId.fromId(jobId).get()));
// assertFalse(listResp3.getJobIds().contains(JobId.fromId(jobId2).get()));
foundJob1 = false;
foundJob2 = false;
for(JobClusterProtoAdapter.JobIdInfo jobIdInfo : listResp3.getJobIds()) {
if(jobIdInfo.getJobId().equals(jobId)) {
foundJob1 = true;
} else if(jobIdInfo.getJobId().equals(jobId2)) {
foundJob2 = true;
}
}
assertFalse(foundJob1);
assertFalse(foundJob2);
}
@Test
public void testListJobsForCluster() throws InvalidJobException, InterruptedException {
TestKit probe = new TestKit(system);
String clusterName = "testListJobsForCluster";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
final JobDefinition jobDefn1 = createJob(clusterName);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe,clusterName, jobClusterActor, jobDefn1, jobId);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId,1,new WorkerId(clusterName,jobId,0,1));
String jobId2 = clusterName + "-2";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn1, jobId2);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobClusterActor,jobId2,1,new WorkerId(clusterName,jobId2,0,1));
jobClusterActor.tell(new ListJobsRequest(), probe.getRef());
// Thread.sleep(1000);
ListJobsResponse listResp = probe.expectMsgClass(ListJobsResponse.class);
assertEquals(SUCCESS, listResp.responseCode);
assertEquals(2, listResp.getJobList().size());
// assertTrue(listResp.getJobIds().contains(JobId.fromId(jobId).get()));
// assertTrue(listResp.getJobIds().contains(JobId.fromId(jobId2).get()));
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor);
jobClusterActor.tell(new ListJobsRequest(new ListJobCriteria(empty(),
empty(),
Lists.newArrayList(),
Lists.newArrayList(),
Lists.newArrayList(),
Lists.newArrayList(),
of(true),
empty(),
empty(),
empty())), probe.getRef());
ListJobsResponse listResp2 = probe.expectMsgClass(ListJobsResponse.class);
assertEquals(SUCCESS, listResp2.responseCode);
assertEquals(1, listResp2.getJobList().size());
// assertFalse(listResp2.getJobIds().contains(JobId.fromId(jobId).get()));
// assertTrue(listResp2.getJobIds().contains(JobId.fromId(jobId2).get()));
JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 2), jobClusterActor);
jobClusterActor.tell(new ListJobsRequest(new ListJobCriteria(empty(),
empty(),
Lists.newArrayList(),
Lists.newArrayList(),
Lists.newArrayList(),
Lists.newArrayList(),
of(true),
empty(),
empty(),
empty())), probe.getRef());
ListJobsResponse listResp3 = probe.expectMsgClass(ListJobsResponse.class);
assertEquals(SUCCESS, listResp3.responseCode);
assertEquals(0, listResp3.getJobList().size());
// assertFalse(listResp3.getJobIds().contains(JobId.fromId(jobId).get()));
// assertFalse(listResp3.getJobIds().contains(JobId.fromId(jobId2).get()));
}
@Test
public void testGetLastSubmittedJob() throws Exception {
TestKit probe = new TestKit(system);
String clusterName = "testGetLastSubmittedJob";
final JobDefinition jobDefn1 = createJob(clusterName);
JobId jobId3 = new JobId(clusterName, 3);
JobInfo jInfo3 = new JobInfo(jobId3,jobDefn1, 1000L, null, JobState.Launched, "user1");
JobId jobId4 = new JobId(clusterName, 4);
JobInfo jInfo4 = new JobInfo(jobId4,jobDefn1, 2000L, null, JobState.Launched, "user1");
JobId jobId1 = new JobId(clusterName, 1);
JobClusterDefinitionImpl.CompletedJob cJob1 = new JobClusterDefinitionImpl.CompletedJob(clusterName, jobId1.getId(), "0.0.1", JobState.Completed, 800L, 900L, "user1", new ArrayList<>());
JobId jobId2 = new JobId(clusterName, 2);
JobClusterDefinitionImpl.CompletedJob cJob2 = new JobClusterDefinitionImpl.CompletedJob(clusterName, jobId2.getId(), "0.0.1", JobState.Completed, 900L, 1000L, "user1", new ArrayList<>());
List<JobClusterDefinitionImpl.CompletedJob> completedJobs = new ArrayList<>();
completedJobs.add(cJob1);
completedJobs.add(cJob2);
List<JobInfo> activeList = new ArrayList<>();
activeList.add(jInfo3);
activeList.add(jInfo4);
Optional<JobId> lastJobIdOp = JobListHelper.getLastSubmittedJobId(activeList,completedJobs);
assertTrue(lastJobIdOp.isPresent());
assertEquals(jobId4, lastJobIdOp.get());
}
/**
* With only completed jobs getlastSubmitted should return the completed job with highest job number
* @throws Exception
*/
@Test
public void testGetLastSubmittedJobWithCompletedOnly() throws Exception {
TestKit probe = new TestKit(system);
String clusterName = "testGetLastSubmittedJobWithCompletedOnly";
final JobDefinition jobDefn1 = createJob(clusterName);
JobId jobId1 = new JobId(clusterName, 1);
JobClusterDefinitionImpl.CompletedJob cJob1 = new JobClusterDefinitionImpl.CompletedJob(clusterName, jobId1.getId(), "0.0.1", JobState.Completed, 800L, 900L, "user1", new ArrayList<>());
JobId jobId2 = new JobId(clusterName, 2);
JobClusterDefinitionImpl.CompletedJob cJob2 = new JobClusterDefinitionImpl.CompletedJob(clusterName, jobId2.getId(), "0.0.1", JobState.Completed, 900L, 1000L, "user1", new ArrayList<>());
List<JobClusterDefinitionImpl.CompletedJob> completedJobs = new ArrayList<>();
completedJobs.add(cJob1);
completedJobs.add(cJob2);
List<JobInfo> activeList = new ArrayList<>();
Optional<JobId> lastJobIdOp = JobListHelper.getLastSubmittedJobId(activeList,completedJobs);
assertTrue(lastJobIdOp.isPresent());
assertEquals(jobId2, lastJobIdOp.get());
}
/**
* No Active or completed jobs should return an empty Optional
* @throws Exception
*/
@Test
public void testGetLastSubmittedJobWithNoJobs() throws Exception {
TestKit probe = new TestKit(system);
String clusterName = "testGetLastSubmittedJobWithNoJobs";
final JobDefinition jobDefn1 = createJob(clusterName);
List<JobClusterDefinitionImpl.CompletedJob> completedJobs = new ArrayList<>();
List<JobInfo> activeList = new ArrayList<>();
Optional<JobId> lastJobIdOp = JobListHelper.getLastSubmittedJobId(activeList,completedJobs);
assertFalse(lastJobIdOp.isPresent());
}
@Test
public void testListJobWithLabelMatch() {
TestKit probe = new TestKit(system);
String clusterName = "testListJobWithLabelMatch";
try {
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreMock, jobDfn -> schedulerMock, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
final JobDefinition jobDefn1;
List<Label> labelList1 = new ArrayList<>();
labelList1.add(new Label("l1","l1v1"));
jobDefn1 = createJob(clusterName, labelList1);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe,clusterName, jobClusterActor, jobDefn1, jobId);
List<Label> labelList2 = new ArrayList<>();
labelList2.add(new Label("l2","l2v2"));
String jobId2 = clusterName + "-2";
JobDefinition jobDefn2 = createJob(clusterName, labelList2);
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn2, jobId2);
// Query for Label1
List<Integer> emptyIntList = Lists.newArrayList();
List<WorkerState.MetaState> workerState = Lists.newArrayList();
ListJobCriteria criteria1 = new ListJobCriteria(Optional.empty(), Optional.empty(), emptyIntList, emptyIntList, emptyIntList,workerState,Optional.empty(),Optional.empty(),of("l1=l1v1"), Optional.empty());
jobClusterActor.tell(new ListJobsRequest(criteria1), probe.getRef());
ListJobsResponse listResp = probe.expectMsgClass(ListJobsResponse.class);
assertEquals(SUCCESS, listResp.responseCode);
// Only job1 should be returned
assertEquals(1, listResp.getJobList().size());
assertEquals(jobId, listResp.getJobList().get(0).getJobMetadata().getJobId());
assertTrue(listResp.getJobList().get(0).getStageMetadataList().size() == 1);
System.out.println("Workers returned : " + listResp.getJobList().get(0).getWorkerMetadataList());
assertTrue(listResp.getJobList().get(0).getWorkerMetadataList().size() == 1);
// Query with an OR query for both labels
ListJobCriteria criteria2 = new ListJobCriteria(Optional.empty(), Optional.empty(), emptyIntList, emptyIntList, emptyIntList,workerState,Optional.empty(),Optional.empty(),of("l1=l1v1,l2=l2v2"), Optional.empty());
jobClusterActor.tell(new ListJobsRequest(criteria2), probe.getRef());
ListJobsResponse listRes2 = probe.expectMsgClass(ListJobsResponse.class);
assertEquals(SUCCESS, listRes2.responseCode);
// Both jobs should be returned
assertEquals(2, listRes2.getJobList().size());
assertTrue(jobId.equals(listRes2.getJobList().get(0).getJobMetadata().getJobId()) || jobId.equals(listRes2.getJobList().get(1).getJobMetadata().getJobId()));
assertTrue(jobId2.equals(listRes2.getJobList().get(0).getJobMetadata().getJobId()) || jobId2.equals(listRes2.getJobList().get(1).getJobMetadata().getJobId()));
// Query with an AND query for both labels
ListJobCriteria criteria3 = new ListJobCriteria(Optional.empty(), Optional.empty(), emptyIntList, emptyIntList, emptyIntList,workerState,Optional.empty(),Optional.empty(),of("l1=l1v1,l2=l2v2"), of("and"));
jobClusterActor.tell(new ListJobsRequest(criteria3), probe.getRef());
ListJobsResponse listRes3 = probe.expectMsgClass(ListJobsResponse.class);
assertEquals(SUCCESS, listRes3.responseCode);
// No jobs should be returned
assertEquals(0, listRes3.getJobList().size());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testLostWorkerGetsReplaced() {
TestKit probe = new TestKit(system);
String clusterName = "testLostWorkerGetsReplaced";
MantisSchedulerFactory schedulerMockFactory = mock(MantisSchedulerFactory.class);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
when(schedulerMockFactory.forJob(any())).thenReturn(schedulerMock);
//MantisJobStore jobStoreMock = mock(MantisJobStore.class);
MantisJobStore jobStoreSpied = Mockito.spy(jobStore);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName);
ActorRef jobClusterActor = system.actorOf(props(clusterName, jobStoreSpied, schedulerMockFactory, eventPublisher, costsCalculator));
jobClusterActor.tell(new JobClusterProto.InitializeJobClusterRequest(fakeJobCluster, user, probe.getRef()), probe.getRef());
JobClusterProto.InitializeJobClusterResponse createResp = probe.expectMsgClass(JobClusterProto.InitializeJobClusterResponse.class);
assertEquals(SUCCESS, createResp.responseCode);
try {
final JobDefinition jobDefn = createJob(clusterName,1, MantisJobDurationType.Transient);
String jobId = clusterName + "-1";
JobTestHelper.submitJobAndVerifySuccess(probe, clusterName, jobClusterActor, jobDefn, jobId);
// JobTestHelper.getJobDetailsAndVerify(probe, jobClusterActor, jobId, SUCCESS, JobState.Accepted);
// JobTestHelper.killJobAndVerify(probe, clusterName, new JobId(clusterName, 1), jobClusterActor);
verify(jobStoreSpied, times(1)).createJobCluster(any());
verify(jobStoreSpied, times(1)).updateJobCluster(any());
int stageNo = 1;
// send launched event
WorkerId workerId = new WorkerId(jobId, 0, 1);
// send heartbeat
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobClusterActor, jobId, stageNo, workerId);
// check job status again
jobClusterActor.tell(new GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp2 + " msg " + resp2.message);
assertEquals(SUCCESS, resp2.responseCode);
// Job started
assertEquals(JobState.Launched, resp2.getJobMetadata().get().getState());
// send launched event
// worker 2 gets terminated abnormally
JobTestHelper.sendWorkerTerminatedEvent(probe, jobClusterActor, jobId, workerId);
// replaced worker comes up and sends events
WorkerId workerId2_replaced = new WorkerId(jobId, 0, 2);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobClusterActor, jobId, stageNo, workerId2_replaced);
jobClusterActor.tell(new GetJobDetailsRequest("nj", jobId), probe.getRef());
GetJobDetailsResponse resp4 = probe.expectMsgClass(GetJobDetailsResponse.class);
IMantisJobMetadata jobMeta = resp4.getJobMetadata().get();
Map<Integer, ? extends IMantisStageMetadata> stageMetadata = jobMeta.getStageMetadata();
IMantisStageMetadata stage = stageMetadata.get(1);
for (JobWorker worker : stage.getAllWorkers()) {
System.out.println("worker -> " + worker.getMetadata());
}
// 2 initial schedules and 1 replacement
verify(schedulerMock, timeout(1_000).times(2)).scheduleWorker(any());
// archive worker should get called once for the dead worker
// verify(jobStoreMock, timeout(1_000).times(1)).archiveWorker(any());
Mockito.verify(jobStoreSpied).archiveWorker(any());
jobClusterActor.tell(new ListJobsRequest(), probe.getRef());
ListJobsResponse listResp2 = probe.expectMsgClass(ListJobsResponse.class);
assertEquals(SUCCESS, listResp2.responseCode);
assertEquals(1, listResp2.getJobList().size());
for(MantisJobMetadataView jb : listResp2.getJobList() ) {
System.out.println("Jb -> " + jb);
}
//assertEquals(jobActor, probe.getLastSender());
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
} finally {
system.stop(jobClusterActor);
}
}
@Test
public void testExpireOldJobs() {
//TODO
}
}
| 7,886 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/LabelManagerTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import static io.mantisrx.master.jobcluster.LabelManager.SystemLabels.*;
import static org.junit.Assert.assertEquals;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.command.InvalidJobException;
import io.mantisrx.runtime.descriptor.DeploymentStrategy;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import org.junit.Test;
public class LabelManagerTest {
@Test
public void insertResubmitLabelTest() throws InvalidJobException {
JobDefinition jobDefinition = generateJobDefinition("insertResubmitLabelTest", new ArrayList<>(),
"art.zip", "1.0");
JobDefinition updatedJobDefn = LabelManager.insertAutoResubmitLabel(jobDefinition);
assertEquals(1, updatedJobDefn.getLabels().size());
Label label = updatedJobDefn.getLabels().get(0);
assertEquals(MANTIS_IS_RESUBMIT_LABEL.label, label.getName());
}
@Test
public void doNotinsertResubmitLabelIfAlreadyExistsTest() throws InvalidJobException {
List<Label> labels = new ArrayList<>();
labels.add(new Label(MANTIS_IS_RESUBMIT_LABEL.label, "true"));
JobDefinition jobDefinition = generateJobDefinition("DoNotinsertResubmitLabelIfAlreadyExistsTest",
labels, "art.zip", "1.0");
JobDefinition updatedJobDefn = LabelManager.insertAutoResubmitLabel(jobDefinition);
assertEquals(1, updatedJobDefn.getLabels().size());
Label label = updatedJobDefn.getLabels().get(0);
assertEquals(MANTIS_IS_RESUBMIT_LABEL.label, label.getName());
}
@Test
public void insertArtifactLabelTest() throws InvalidJobException {
String artifactName = "art.zip";
JobDefinition jobDefinition = generateJobDefinition("insertResubmitLabelTest", new ArrayList<>(),
artifactName, "1.0");
JobDefinition updatedJobDefn = LabelManager.insertSystemLabels(jobDefinition, false);
assertEquals(2, updatedJobDefn.getLabels().size());
List<Label> labels = updatedJobDefn.getLabels().stream().filter(
label -> label.getName().equals(MANTIS_ARTIFACT_LABEL.label))
.collect(Collectors.toList());
Label label = labels.get(0);
assertEquals(MANTIS_ARTIFACT_LABEL.label, label.getName());
assertEquals(artifactName, label.getValue());
}
@Test
public void replaceArtifactLabelTest() throws InvalidJobException {
String artifactName = "art1.zip";
List<Label> labels = new ArrayList<>();
labels.add(new Label(MANTIS_ARTIFACT_LABEL.label, "art0.zip"));
JobDefinition jobDefinition = generateJobDefinition("replaceArtifactLabelTest", labels,
artifactName, "1.0");
JobDefinition updatedJobDefn = LabelManager.insertSystemLabels(jobDefinition, false);
assertEquals(2, updatedJobDefn.getLabels().size());
labels = updatedJobDefn.getLabels().stream().filter(
label -> label.getName().equals(MANTIS_ARTIFACT_LABEL.label))
.collect(Collectors.toList());
Label label = labels.get(0);
assertEquals(MANTIS_ARTIFACT_LABEL.label, label.getName());
assertEquals(artifactName, label.getValue());
}
@Test
public void insertVersionLabelTest() throws InvalidJobException {
String artifactName = "art.zip";
JobDefinition jobDefinition = generateJobDefinition("insertVersionLabelTest", new ArrayList<>(),
artifactName, "1.0");
JobDefinition updatedJobDefn = LabelManager.insertSystemLabels(jobDefinition, false);
assertEquals(2, updatedJobDefn.getLabels().size());
List<Label> labels = updatedJobDefn.getLabels().stream().filter(
label -> label.getName().equals(MANTIS_VERSION_LABEL.label))
.collect(Collectors.toList());
Label label = labels.get(0);
assertEquals(MANTIS_VERSION_LABEL.label, label.getName());
assertEquals("1.0", label.getValue());
}
@Test
public void replaceVersionLabelTest() throws InvalidJobException {
String artifactName = "art1.zip";
String v0 = "1.0";
String v1 = "2.0";
List<Label> labels = new ArrayList<>();
labels.add(new Label(MANTIS_VERSION_LABEL.label, v0));
JobDefinition jobDefinition = generateJobDefinition("replaceVersionLabelTest", labels,
artifactName, "2.0");
JobDefinition updatedJobDefn = LabelManager.insertSystemLabels(jobDefinition, false);
assertEquals(2, updatedJobDefn.getLabels().size());
labels = updatedJobDefn.getLabels().stream().filter(
label -> label.getName().equals(MANTIS_VERSION_LABEL.label))
.collect(Collectors.toList());
Label label = labels.get(0);
assertEquals(MANTIS_VERSION_LABEL.label, label.getName());
assertEquals(v1, label.getValue());
}
@Test
public void systemLabelTest() throws InvalidJobException {
String artifactName = "art1.zip";
List<Label> labels = new ArrayList<>();
labels.add(new Label(MANTIS_ARTIFACT_LABEL.label, "art0.zip"));
JobDefinition jobDefinition = generateJobDefinition("systemLabelTest", labels,
artifactName,"1.0");
JobDefinition updatedJobDefn = LabelManager.insertSystemLabels(jobDefinition, true);
assertEquals(3, updatedJobDefn.getLabels().size());
for(Label l : updatedJobDefn.getLabels()) {
if(l.getName().equals(MANTIS_ARTIFACT_LABEL.label)) {
assertEquals(artifactName, l.getValue());
} else if (l.getName().equals(MANTIS_IS_RESUBMIT_LABEL.label)){
assertEquals("true", l.getValue());
} else {
assertEquals("1.0", l.getValue());
}
}
}
@Test
public void insertResourceClusterLabel() throws InvalidJobException {
List<Label> labels = new ArrayList<>();
DeploymentStrategy dS = DeploymentStrategy.builder().resourceClusterId("resc1").build();
JobDefinition jobDefinition = generateJobDefinitionBuilder(
"insertResourceClusterLabelTest", labels, "art.zip", "1.0", dS)
.build();
assertEquals(1, jobDefinition.getLabels().size());
Label label = jobDefinition.getLabels().get(0);
assertEquals(MANTIS_RESOURCE_CLUSTER_NAME_LABEL.label, label.getName());
assertEquals("resc1", label.getValue());
DeploymentStrategy dS2 = DeploymentStrategy.builder().build();
jobDefinition = generateJobDefinitionBuilder(
"insertResourceClusterLabelTest", labels, "art.zip", "1.0", dS2)
.build();
assertEquals(0, jobDefinition.getLabels().size());
jobDefinition = generateJobDefinitionBuilder(
"insertResourceClusterLabelTest", labels, "art.zip", "1.0", null)
.build();
assertEquals(0, jobDefinition.getLabels().size());
// test override
List<Label> labels2 = Lists.newArrayList(new Label(MANTIS_RESOURCE_CLUSTER_NAME_LABEL.label, "wrongCluster"));
jobDefinition = generateJobDefinitionBuilder(
"insertResourceClusterLabelTest", labels2, "art.zip", "1.0", dS)
.build();
assertEquals(1, jobDefinition.getLabels().size());
label = jobDefinition.getLabels().get(0);
assertEquals(MANTIS_RESOURCE_CLUSTER_NAME_LABEL.label, label.getName());
assertEquals("resc1", label.getValue());
}
JobDefinition.Builder generateJobDefinitionBuilder(
String name, List<Label> labelList, String artifactName, String version, DeploymentStrategy deploymentStrategy) {
return new JobDefinition.Builder()
.withName(name)
.withParameters(Lists.newArrayList())
.withLabels(labelList)
.withSchedulingInfo(JobClusterTest.SINGLE_WORKER_SCHED_INFO)
.withArtifactName(artifactName)
.withVersion(version)
.withSubscriptionTimeoutSecs(1)
.withUser("njoshi")
.withJobSla(new JobSla(0, 0,
JobSla.StreamSLAType.Lossy, MantisJobDurationType.Transient, "userType"))
.withDeploymentStrategy(deploymentStrategy);
}
JobDefinition generateJobDefinition(String name, List<Label> labelList, String artifactName, String version)
throws InvalidJobException {
return generateJobDefinitionBuilder(name, labelList, artifactName, version, null)
.build();
}
}
| 7,887 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/JobManagerTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster;
import static java.util.Optional.empty;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.*;
import akka.actor.AbstractActor;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.jobcluster.JobClusterActor.JobInfo;
import io.mantisrx.master.jobcluster.JobClusterActor.JobManager;
import io.mantisrx.master.jobcluster.job.CostsCalculator;
import io.mantisrx.master.jobcluster.job.JobState;
import io.mantisrx.master.jobcluster.job.JobTestHelper;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import java.io.IOException;
import java.time.Instant;
import java.util.List;
import java.util.Optional;
import org.junit.BeforeClass;
import org.junit.Test;
public class JobManagerTest {
private static MantisJobStore jobStore;
private static AbstractActor.ActorContext context;
private static MantisSchedulerFactory schedulerFactory;
private static LifecycleEventPublisher publisher;
private final CostsCalculator costsCalculator = CostsCalculator.noop();
@BeforeClass
public static void setup() {
jobStore = mock(MantisJobStore.class);
context = mock(AbstractActor.ActorContext.class);
schedulerFactory = mock(MantisSchedulerFactory.class);
publisher = mock(LifecycleEventPublisher.class);
JobTestHelper.createDirsIfRequired();
TestHelpers.setupMasterConfig();
}
@Test
public void acceptedToActive() {
JobClusterActor.JobManager jm = new JobManager("name", context, schedulerFactory, publisher, jobStore, costsCalculator);
JobId jId1 = new JobId("name",1);
JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj");
assertTrue(jm.markJobAccepted(jInfo1));
assertEquals(1, jm.acceptedJobsCount());
assertTrue(jm.markJobStarted(jInfo1));
assertEquals(0, jm.acceptedJobsCount());
assertEquals(1, jm.activeJobsCount());
assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo1));
}
@Test
public void acceptedToCompleted() {
JobClusterActor.JobManager jm = new JobManager("name", context, schedulerFactory, publisher, jobStore, costsCalculator);
JobId jId1 = new JobId("name",1);
JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj");
assertTrue(jm.markJobAccepted(jInfo1));
assertEquals(1, jm.acceptedJobsCount());
assertTrue(jm.getCompletedJobsList().size() == 0);
assertTrue(jm.markCompleted(jId1,empty(),JobState.Completed).isPresent());
assertEquals(0, jm.acceptedJobsCount());
assertEquals(1, jm.getCompletedJobsList().size());
assertEquals(0, jm.activeJobsCount());
assertFalse(jm.getAllNonTerminalJobsList().contains(jInfo1));
assertTrue(jm.getCompletedJobsList().size() == 1);
JobClusterDefinitionImpl.CompletedJob completedJob = jm.getCompletedJobsList().get(0);
assertEquals(jId1.getId(), completedJob.getJobId());
}
@Test
public void acceptedToTerminating() {
JobClusterActor.JobManager jm = new JobManager("name", context, schedulerFactory, publisher, jobStore, costsCalculator);
JobId jId1 = new JobId("name",1);
JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj");
assertTrue(jm.markJobAccepted(jInfo1));
assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo1));
assertEquals(1, jm.acceptedJobsCount());
assertTrue(jm.markJobTerminating(jInfo1, JobState.Terminating_abnormal));
assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo1));
assertEquals(0, jm.acceptedJobsCount());
assertEquals(0 , jm.activeJobsCount());
Optional<JobInfo> j1 = jm.getJobInfoForNonTerminalJob(jId1);
assertTrue(j1.isPresent());
assertEquals(jId1, j1.get().jobId);
}
@Test
public void terminatingToActiveIsIgnored() {
JobClusterActor.JobManager jm = new JobManager("name", context, schedulerFactory, publisher, jobStore, costsCalculator);
JobId jId1 = new JobId("name",1);
JobDefinition jdMock = mock(JobDefinition.class);
JobInfo jInfo1 = new JobInfo(jId1, jdMock, 0, null, JobState.Accepted, "nj");
jm.markJobAccepted(jInfo1);
assertEquals(1, jm.acceptedJobsCount());
Optional<JobInfo> jInfo1Op = jm.getJobInfoForNonTerminalJob(jId1);
assertTrue(jInfo1Op.isPresent());
assertTrue(jm.markJobTerminating(jInfo1Op.get(), JobState.Terminating_abnormal));
jInfo1Op = jm.getJobInfoForNonTerminalJob(jId1);
assertTrue(jInfo1Op.isPresent());
assertFalse(jm.markJobStarted(jInfo1Op.get()));
}
@Test
public void activeToAcceptedFails() {
JobClusterActor.JobManager jm = new JobManager("name", context, schedulerFactory, publisher, jobStore, costsCalculator);
JobId jId1 = new JobId("name",1);
JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj");
assertTrue(jm.markJobAccepted(jInfo1));
assertEquals(1, jm.acceptedJobsCount());
assertTrue(jm.markJobTerminating(jInfo1, JobState.Terminating_abnormal));
assertFalse(jm.markJobAccepted(jInfo1));
}
@Test
public void testGetAcceptedJobList() {
JobClusterActor.JobManager jm = new JobManager("name", context, schedulerFactory, publisher, jobStore, costsCalculator);
JobId jId1 = new JobId("name",1);
JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj");
assertTrue(jm.markJobAccepted(jInfo1));
JobId jId2 = new JobId("name",2);
JobInfo jInfo2 = new JobInfo(jId2, null, 0, null, JobState.Accepted, "nj");
assertTrue(jm.markJobAccepted(jInfo2));
List<JobInfo> acceptedJobList = jm.getAcceptedJobsList();
assertEquals(2, acceptedJobList.size());
assertTrue(jId1.equals(acceptedJobList.get(0).jobId) || jId1.equals(acceptedJobList.get(1).jobId));
assertTrue(jId2.equals(acceptedJobList.get(0).jobId) || jId2.equals(acceptedJobList.get(1).jobId));
try {
acceptedJobList.remove(0);
fail();
} catch (Exception e) {
}
}
@Test
public void testGetActiveJobList() {
JobClusterActor.JobManager jm = new JobManager("name", context, schedulerFactory, publisher, jobStore, costsCalculator);
JobId jId1 = new JobId("name",1);
JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj");
assertTrue(jm.markJobAccepted(jInfo1));
assertTrue(jm.markJobStarted(jInfo1));
JobId jId2 = new JobId("name",2);
JobInfo jInfo2 = new JobInfo(jId2, null, 0, null, JobState.Accepted, "nj");
assertTrue(jm.markJobAccepted(jInfo2));
assertTrue(jm.markJobStarted(jInfo2));
List<JobInfo> acceptedJobList = jm.getAcceptedJobsList();
assertEquals(0, acceptedJobList.size());
List<JobInfo> activeJobList = jm.getActiveJobsList();
assertEquals(2, jm.getActiveJobsList().size());
assertTrue(jId1.equals(activeJobList.get(0).jobId) || jId1.equals(activeJobList.get(1).jobId));
assertTrue(jId2.equals(activeJobList.get(0).jobId) || jId2.equals(activeJobList.get(1).jobId));
try {
activeJobList.remove(0);
fail();
} catch (Exception e) {
}
}
@Test
public void testPurgeOldJobs() {
String clusterName = "testPurgeOldJobs";
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
JobClusterActor.JobManager jm = new JobManager(clusterName, context, schedulerFactory, publisher, jobStoreMock, costsCalculator);
JobId jId1 = new JobId(clusterName,1);
JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj");
assertTrue(jm.markJobAccepted(jInfo1));
assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo1));
JobId jId2 = new JobId(clusterName,2);
JobInfo jInfo2 = new JobInfo(jId2, null, 1, null, JobState.Accepted, "nj");
assertTrue(jm.markJobAccepted(jInfo2));
assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo2));
assertTrue(jm.getAllNonTerminalJobsList().size() == 2);
assertEquals(jInfo1 ,jm.getAllNonTerminalJobsList().get(1));
assertEquals(jInfo2 ,jm.getAllNonTerminalJobsList().get(0));
jm.markJobTerminating(jInfo1, JobState.Terminating_abnormal);
Instant completionInstant = Instant.now().minusSeconds(5);
jm.markCompleted(jId1,completionInstant.toEpochMilli(),empty(),JobState.Completed);
assertEquals(1,jm.getCompletedJobsList().size());
assertEquals(jId1.getId(), jm.getCompletedJobsList().get(0).getJobId());
jm.purgeOldCompletedJobs(Instant.now().minusSeconds(3).toEpochMilli());
assertEquals(0,jm.getCompletedJobsList().size());
try {
verify(jobStoreMock,times(1)).deleteCompletedJob(clusterName,jId1.getId());
verify(jobStoreMock,times(1)).deleteJob(jId1.getId());
} catch (IOException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testJobListSortedCorrectly() {
String clusterName = "testJobListSortedCorrectly";
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
JobClusterActor.JobManager jm = new JobManager(clusterName, context, schedulerFactory, publisher, jobStoreMock, costsCalculator);
JobId jId1 = new JobId(clusterName,1);
JobInfo jInfo1 = new JobInfo(jId1, null, 0, null, JobState.Accepted, "nj");
assertTrue(jm.markJobAccepted(jInfo1));
assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo1));
JobId jId2 = new JobId(clusterName,2);
JobInfo jInfo2 = new JobInfo(jId2, null, 1, null, JobState.Accepted, "nj");
assertTrue(jm.markJobAccepted(jInfo2));
assertTrue(jm.getAllNonTerminalJobsList().contains(jInfo2));
assertTrue(jm.getAllNonTerminalJobsList().size() == 2);
assertEquals(jInfo1 ,jm.getAllNonTerminalJobsList().get(1));
assertEquals(jInfo2 ,jm.getAllNonTerminalJobsList().get(0));
jm.markJobTerminating(jInfo1, JobState.Terminating_abnormal);
Instant completionInstant = Instant.now().minusSeconds(5);
jm.markCompleted(jId1,completionInstant.toEpochMilli(),empty(),JobState.Completed);
assertEquals(1,jm.getCompletedJobsList().size());
assertEquals(jId1.getId(), jm.getCompletedJobsList().get(0).getJobId());
jm.markJobTerminating(jInfo1, JobState.Terminating_abnormal);
completionInstant = Instant.now().minusSeconds(2);
jm.markCompleted(jId2,completionInstant.toEpochMilli(),empty(),JobState.Completed);
assertEquals(2,jm.getCompletedJobsList().size());
assertEquals(jId2.getId(), jm.getCompletedJobsList().get(0).getJobId());
assertEquals(jId1.getId(), jm.getCompletedJobsList().get(1).getJobId());
}
}
| 7,888 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/proto/JobClusterManagerProtoTest.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.proto;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import io.mantisrx.master.api.akka.route.Jackson;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateSchedulingInfoRequest;
import org.junit.Test;
public class JobClusterManagerProtoTest {
@Test
public void testDeserialization() throws Exception {
String json = "{\n"
+ " \"schedulingInfo\":\n"
+ " {\n"
+ " \"stages\":\n"
+ " {\n"
+ " \"1\":\n"
+ " {\n"
+ " \"numberOfInstances\": 1,\n"
+ " \"machineDefinition\":\n"
+ " {\n"
+ " \"cpuCores\": 1.0,\n"
+ " \"memoryMB\": 4094.0,\n"
+ " \"networkMbps\": 128.0,\n"
+ " \"diskMB\": 6000.0,\n"
+ " \"numPorts\": 1\n"
+ " },\n"
+ " \"hardConstraints\":\n"
+ " [],\n"
+ " \"softConstraints\":\n"
+ " [],\n"
+ " \"scalingPolicy\": null,\n"
+ " \"scalable\": false\n"
+ " },\n"
+ " \"2\":\n"
+ " {\n"
+ " \"numberOfInstances\": 1,\n"
+ " \"machineDefinition\":\n"
+ " {\n"
+ " \"cpuCores\": 1.0,\n"
+ " \"memoryMB\": 4096.0,\n"
+ " \"networkMbps\": 128.0,\n"
+ " \"diskMB\": 6000.0,\n"
+ " \"numPorts\": 1\n"
+ " },\n"
+ " \"hardConstraints\":\n"
+ " [],\n"
+ " \"softConstraints\":\n"
+ " [],\n"
+ " \"scalingPolicy\": null,\n"
+ " \"scalable\": false\n"
+ " },\n"
+ " \"3\":\n"
+ " {\n"
+ " \"numberOfInstances\": 1,\n"
+ " \"machineDefinition\":\n"
+ " {\n"
+ " \"cpuCores\": 1.0,\n"
+ " \"memoryMB\": 4096.0,\n"
+ " \"networkMbps\": 128.0,\n"
+ " \"diskMB\": 6000.0,\n"
+ " \"numPorts\": 1\n"
+ " },\n"
+ " \"hardConstraints\":\n"
+ " [],\n"
+ " \"softConstraints\":\n"
+ " [],\n"
+ " \"scalingPolicy\": null,\n"
+ " \"scalable\": false\n"
+ " }\n"
+ " }\n"
+ " },\n"
+ " \"version\": \"0.0.1-snapshot.202303220002+fdichiara.runtimeV2.d16a200 2023-05-09 09:10:42\"\n"
+ "}";
UpdateSchedulingInfoRequest result =
Jackson.fromJSON(json, UpdateSchedulingInfoRequest.class);
assertNotNull(result);
assertEquals(result.getVersion(), "0.0.1-snapshot.202303220002+fdichiara.runtimeV2.d16a200 2023-05-09 09:10:42");
result.getSchedulingInfo().getStages().values().forEach(stage -> {
assertNotNull(stage.getMachineDefinition());
assertNotNull(stage.getHardConstraints());
assertNotNull(stage.getSoftConstraints());
});
}
}
| 7,889 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/job/JobTestMigrationTests.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.testkit.javadsl.TestKit;
import com.netflix.fenzo.VirtualMachineCurrentState;
import com.netflix.fenzo.VirtualMachineLease;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobProto;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.WorkerMigrationConfig.MigrationStrategyEnum;
import io.mantisrx.runtime.command.InvalidJobException;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.domain.IJobClusterDefinition;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.ScheduleRequest;
import io.mantisrx.server.master.scheduler.WorkerOnDisabledVM;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.time.Instant;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class JobTestMigrationTests {
static ActorSystem system;
private static final String user = "mantis";
final LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl());
@BeforeClass
public static void setup() {
system = ActorSystem.create();
TestHelpers.setupMasterConfig();
}
@AfterClass
public static void tearDown() {
//((SimpleCachedFileStorageProvider)storageProvider).deleteAllFiles();
TestKit.shutdownActorSystem(system);
system = null;
}
@Test
public void testWorkerMigration() {
String clusterName= "testWorkerMigration";
TestKit probe = new TestKit(system);
SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList()).build();
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName, sInfo, new WorkerMigrationConfig(MigrationStrategyEnum.ONE_WORKER, "{}"));
CountDownLatch scheduleCDL = new CountDownLatch(2);
CountDownLatch unscheduleCDL = new CountDownLatch(1);
JobDefinition jobDefn;
try {
jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo);
MantisScheduler schedulerMock = new DummyScheduler(scheduleCDL, unscheduleCDL); //mock(MantisScheduler.class); //
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName,2))
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefn)
.build();
final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher, CostsCalculator.noop()));
jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class);
assertEquals(SUCCESS, initMsg.responseCode);
String jobId = clusterName + "-2";
int stageNo = 1;
WorkerId workerId = new WorkerId(jobId, 0, 1);
// send Launched, Initiated and heartbeat
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId);
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
JobClusterManagerProto.GetJobDetailsResponse resp3 = probe.expectMsgClass(JobClusterManagerProto.GetJobDetailsResponse.class);
assertEquals(SUCCESS, resp3.responseCode);
// worker has started so job should be started.
assertEquals(JobState.Launched,resp3.getJobMetadata().get().getState());
// Send migrate worker message
jobActor.tell(new WorkerOnDisabledVM(workerId), probe.getRef());
// Trigger check hb status and that should start the migration. And migrate first worker
Instant now = Instant.now();
jobActor.tell(new JobProto.CheckHeartBeat(), probe.getRef());
// send HB for the migrated worker
WorkerId migratedWorkerId1 = new WorkerId(jobId, 0, 2);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, migratedWorkerId1);
// Trigger another check should be noop
// jobActor.tell(new JobProto.CheckHeartBeat(now.plusSeconds(120)), probe.getRef());
scheduleCDL.await(1, TimeUnit.SECONDS);
unscheduleCDL.await(1, TimeUnit.SECONDS);
// // 1 original submissions and 1 resubmit because of migration
// when(schedulerMock.scheduleWorker(any())).
// verify(schedulerMock, times(2)).scheduleWorker(any());
//// // 1 kill due to resubmits
// verify(schedulerMock, times(1)).unscheduleWorker(any(), any());
//
//assertEquals(jobActor, probe.getLastSender());
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
class DummyScheduler implements MantisScheduler {
CountDownLatch schedL;
CountDownLatch unschedL;
public DummyScheduler(CountDownLatch scheduleCDL, CountDownLatch unscheduleCDL) {
schedL = scheduleCDL;
unschedL = unscheduleCDL;
}
@Override
public void scheduleWorker(ScheduleRequest scheduleRequest) {
// TODO Auto-generated method stub
System.out.println("----------------------> schedule Worker Called");
schedL.countDown();
}
@Override
public void unscheduleWorker(WorkerId workerId, Optional<String> hostname) {
// TODO Auto-generated method stub
unschedL.countDown();
}
@Override
public void unscheduleAndTerminateWorker(WorkerId workerId, Optional<String> hostname) {
// TODO Auto-generated method stub
}
@Override
public void updateWorkerSchedulingReadyTime(WorkerId workerId, long when) {
// TODO Auto-generated method stub
}
@Override
public void initializeRunningWorker(ScheduleRequest scheduleRequest, String hostname, String hostID) {
// TODO Auto-generated method stub
}
@Override
public void rescindOffer(String offerId) {
// TODO Auto-generated method stub
}
@Override
public void rescindOffers(String hostname) {
// TODO Auto-generated method stub
}
@Override
public void addOffers(List<VirtualMachineLease> offers) {
// TODO Auto-generated method stub
}
@Override
public void disableVM(String hostname, long durationMillis) throws IllegalStateException {
// TODO Auto-generated method stub
}
@Override
public void enableVM(String hostname) {
// TODO Auto-generated method stub
}
@Override
public List<VirtualMachineCurrentState> getCurrentVMState() {
// TODO Auto-generated method stub
return null;
}
@Override
public void setActiveVmGroups(List<String> activeVmGroups) {
// TODO Auto-generated method stub
}
}
public static void main(String[] args) {
}
}
| 7,890 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/job/WorkerResubmitRateLimiterTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import static org.junit.Assert.*;
import io.mantisrx.server.core.domain.WorkerId;
import java.util.List;
import org.junit.Test;
public class WorkerResubmitRateLimiterTest {
@Test
public void ctorTest() {
WorkerResubmitRateLimiter wrrl = new WorkerResubmitRateLimiter("5:10:15", 5);
assertEquals(5,wrrl.getExpireResubmitDelaySecs());
long [] resubmitIntervalArray = wrrl.getResubmitIntervalSecs();
assertEquals(4, resubmitIntervalArray.length);
assertEquals(0, resubmitIntervalArray[0]);
assertEquals(5, resubmitIntervalArray[1]);
assertEquals(10, resubmitIntervalArray[2]);
assertEquals(15, resubmitIntervalArray[3]);
}
@Test
public void ctorTest_nointervalgiven() {
WorkerResubmitRateLimiter wrrl = new WorkerResubmitRateLimiter("", 5);
assertEquals(5,wrrl.getExpireResubmitDelaySecs());
long [] resubmitIntervalArray = wrrl.getResubmitIntervalSecs();
assertEquals(4, resubmitIntervalArray.length);
assertEquals(0, resubmitIntervalArray[0]);
assertEquals(5, resubmitIntervalArray[1]);
assertEquals(10, resubmitIntervalArray[2]);
assertEquals(20, resubmitIntervalArray[3]);
try {
wrrl = new WorkerResubmitRateLimiter("", 0);
fail();
} catch(Exception e) {
}
try {
wrrl = new WorkerResubmitRateLimiter("", -1);
fail();
} catch(Exception e) {
}
}
@Test
public void addWorkerTest() {
WorkerResubmitRateLimiter wrrl = new WorkerResubmitRateLimiter("5:10:15", 5);
int stageNum = 1;
long currTime = System.currentTimeMillis();
WorkerId workerId = new WorkerId("TestJob-1", 0, 1);
long resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime);
assertEquals(currTime, resubmitTime);
resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime);
assertEquals(currTime + 5000, resubmitTime);
resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime);
assertEquals(currTime + 10000, resubmitTime);
resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime);
assertEquals(currTime + 15000, resubmitTime);
resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime);
assertEquals(currTime + 15000, resubmitTime);
}
@Test
public void addMultipleWorkerTest() {
WorkerResubmitRateLimiter wrrl = new WorkerResubmitRateLimiter("5:10:15", 5);
int stageNum = 1;
long currTime = System.currentTimeMillis();
WorkerId workerId = new WorkerId("TestJob-1", 0, 1);
WorkerId workerId2 = new WorkerId("TestJob-1", 1, 2);
long resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime);
assertEquals(currTime, resubmitTime);
resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime);
assertEquals(currTime + 5000, resubmitTime);
resubmitTime = wrrl.getWorkerResubmitTime(workerId2, stageNum, currTime);
assertEquals(currTime, resubmitTime);
resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime);
assertEquals(currTime + 10000, resubmitTime);
resubmitTime = wrrl.getWorkerResubmitTime(workerId2, stageNum, currTime);
assertEquals(currTime + 5000, resubmitTime);
resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime);
assertEquals(currTime + 15000, resubmitTime);
resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime);
assertEquals(currTime + 15000, resubmitTime);
}
@Test
public void expireOldEntryTest() {
WorkerResubmitRateLimiter wrrl = new WorkerResubmitRateLimiter("5:10:15", 5);
int stageNum = 1;
long currTime = System.currentTimeMillis();
WorkerId workerId = new WorkerId("TestJob-1", 0, 1);
WorkerId workerId2 = new WorkerId("TestJob-1", 1, 2);
long resubmitTime = wrrl.getWorkerResubmitTime(workerId, stageNum, currTime);
List<WorkerResubmitRateLimiter.ResubmitRecord> resubmitRecords = wrrl.getResubmitRecords();
assertTrue(resubmitRecords.size() == 1);
currTime += 4_000;
resubmitTime = wrrl.getWorkerResubmitTime(workerId2, stageNum, currTime);
resubmitRecords = wrrl.getResubmitRecords();
assertEquals(2, resubmitRecords.size());
// Move time now to 6 seconds which is greater than expiry time of 5
currTime += 2000;
// This should expire worker id 1 but not 2
wrrl.expireResubmitRecords(currTime);
resubmitRecords = wrrl.getResubmitRecords();
assertEquals(1, resubmitRecords.size());
assertEquals(stageNum + "_" + workerId2.getWorkerIndex(), resubmitRecords.get(0).getWorkerKey());
}
}
| 7,891 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/job/JobTestTimeout.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//package io.mantisrx.master.jobcluster.job;
//
//import static org.junit.Assert.assertFalse;
//import static org.junit.Assert.assertTrue;
//import static org.junit.Assert.fail;
//
//import java.time.Instant;
//
//import org.junit.AfterClass;
//import org.junit.BeforeClass;
//import org.junit.Test;
//
//import com.google.common.collect.Lists;
//
//import akka.actor.ActorSystem;
//import akka.testkit.javadsl.TestKit;
//import io.mantisrx.master.jobcluster.job.JobActor.SubscriptionTracker;
//import io.mantisrx.runtime.JobOwner;
//import io.mantisrx.runtime.MachineDefinition;
//import io.mantisrx.runtime.WorkerMigrationConfig;
//import io.mantisrx.runtime.descriptor.SchedulingInfo;
//import io.mantisrx.server.master.domain.IJobClusterDefinition;
//import io.mantisrx.server.master.domain.JobClusterConfig;
//import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
//import io.mantisrx.server.master.persistence.IMantisStorageProvider;
//import io.mantisrx.server.master.persistence.MantisJobStore;
//import io.mantisrx.server.master.persistence.SimpleCachedFileStorageProvider;
//
//public class JobTestTimeout {
//
// static ActorSystem system;
// private static TestKit probe;
// private static String name;
// private static MantisJobStore jobStore;
// private static IMantisStorageProvider storageProvider;
// private static final String user = "mantis";
// private static IJobClusterDefinition jobClusterDefn ;
//
// @BeforeClass
// public static void setup() {
// system = ActorSystem.create();
//
// system = ActorSystem.create();
// probe = new TestKit(system);
// name = "testCluster";
//
// JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
// .withArtifactName("myart")
// .withParameters(Lists.newArrayList())
// .withSchedulingInfo(new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(new MachineDefinition(0, 0, 0, 0, 0), Lists.newArrayList(), Lists.newArrayList()).build())
// .withSubscriptionTimeoutSecs(0)
// .withVersion("0.0.1")
//
// .build();
//
// jobClusterDefn = new JobClusterDefinitionImpl.Builder()
// .withJobClusterConfig(clusterConfig)
// .withName(name)
//
// .withSubscriptionTimeoutSecs(0)
// .withUser(user)
// .withIsReadyForJobMaster(true)
// .withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo"))
// .withMigrationConfig(WorkerMigrationConfig.DEFAULT)
//
// .build();
//
//
// storageProvider = new SimpleCachedFileStorageProvider();
// jobStore = new MantisJobStore(storageProvider);
//
//
// }
//
// @AfterClass
// public static void tearDown() {
// ((SimpleCachedFileStorageProvider)storageProvider).deleteAllFiles();
// TestKit.shutdownActorSystem(system);
// system = null;
// }
//
//
//
//
// @Test
// public void testHasTimedout() {
// long subsTimeout = 30;
// long minRuntime = 5;
// long maxRuntime = Long.MAX_VALUE;
// SubscriptionTracker st = new SubscriptionTracker(subsTimeout,minRuntime, maxRuntime);
// Instant now = Instant.now();
// st.onJobStart(now);
// // less than min runtime will not timeout
// assertFalse(st.shouldTerminate(now.plusSeconds(3)));
//
// // equal to min runtime will not timeout
// assertFalse(st.shouldTerminate(now.plusSeconds(5)));
//
// // greater than min runtime and but subscription time out not hit
// assertFalse(st.shouldTerminate(now.plusSeconds(7)));
//
// // if it is subscribed then min runtime does not matter
// st.onSubscribe();
// assertFalse(st.shouldTerminate(now.plusSeconds(7)));
//
// st.onUnSubscribe(now.plusSeconds(10));
// // subs timeout timer will now start
// // timeout will happen at t + 10 + 30 seconds
// assertFalse(st.shouldTerminate(now.plusSeconds(32)));
//
// assertTrue(st.shouldTerminate(now.plusSeconds(40)));
//
// assertTrue(st.shouldTerminate(now.plusSeconds(42)));
// }
//
// @Test
// public void testMinRuntimeGreater() {
// long subsTimeout = 30;
// long minRuntime = 40;
// long maxRuntime = Long.MAX_VALUE;
// SubscriptionTracker st = new SubscriptionTracker(subsTimeout, minRuntime, maxRuntime);
// Instant now = Instant.now();
// st.onJobStart(now);
// // less than min runtime will not timeout
// assertFalse(st.shouldTerminate(now.plusSeconds(35)));
//
// // equal to min runtime will not timeout
// assertFalse(st.shouldTerminate(now.plusSeconds(40)));
//
// // greater than min runtime and subscription time out hit
// assertTrue(st.shouldTerminate(now.plusSeconds(47)));
//
// // if it is subscribed then min runtime does not matter
// st.onSubscribe();
// assertFalse(st.shouldTerminate(now.plusSeconds(47)));
//
// st.onUnSubscribe(now.plusSeconds(50));
// // subs timeout timer will now start
// // timeout will happen at t + 50 + 30 seconds
// assertFalse(st.shouldTerminate(now.plusSeconds(62)));
//
// assertTrue(st.shouldTerminate(now.plusSeconds(80)));
//
// assertTrue(st.shouldTerminate(now.plusSeconds(82)));
// }
//
// @Test
// public void testHasNoTimeoutSet() {
// long subsTimeout = Long.MAX_VALUE;
// long minRuntime = 0;
// long maxRuntime = Long.MAX_VALUE;
// SubscriptionTracker st = new SubscriptionTracker(subsTimeout, minRuntime, maxRuntime);
// Instant now = Instant.now();
// st.onJobStart(now);
// // less than min runtime will not timeout
// assertFalse(st.shouldTerminate(now.plusSeconds(3)));
//
// // equal to min runtime will not timeout
// assertFalse(st.shouldTerminate(now.plusSeconds(5)));
//
// // greater than min runtime and but subscription time out not hit
// assertFalse(st.shouldTerminate(now.plusSeconds(7)));
//
// // if it is subscribed then min runtime does not matter
// st.onSubscribe();
// assertFalse(st.shouldTerminate(now.plusSeconds(7)));
//
// st.onUnSubscribe(now.plusSeconds(10));
// // subs timeout timer will now start
// // timeout will happen at t + 10 + 30 seconds
// assertFalse(st.shouldTerminate(now.plusSeconds(32)));
//
// assertFalse(st.shouldTerminate(now.plusSeconds(40)));
//
// assertFalse(st.shouldTerminate(now.plusSeconds(42)));
// }
//
// @Test
// public void testHasMinRuntimeTimeoutSetOnly() {
// long subsTimeout = 30;
// long minRuntime = 5;
// long maxRuntime = Long.MAX_VALUE;
// // If subs timeout is not explicitly set it is set to the default of 30
// SubscriptionTracker st = new SubscriptionTracker(subsTimeout, minRuntime, maxRuntime);
// Instant now = Instant.now();
// st.onJobStart(now);
// // less than min runtime will not timeout
// assertFalse(st.shouldTerminate(now.plusSeconds(3)));
//
// // equal to min runtime will not timeout
// assertFalse(st.shouldTerminate(now.plusSeconds(5)));
//
// // greater than min runtime and but subscription time out not hit
// assertFalse(st.shouldTerminate(now.plusSeconds(7)));
//
// // if it is subscribed then min runtime does not matter
// st.onSubscribe();
// assertFalse(st.shouldTerminate(now.plusSeconds(7)));
//
// st.onUnSubscribe(now.plusSeconds(10));
// // subs timeout timer will now start
// // timeout will happen at t + 10 + 30 seconds
// assertFalse(st.shouldTerminate(now.plusSeconds(32)));
//
// assertTrue(st.shouldTerminate(now.plusSeconds(40)));
//
// assertTrue(st.shouldTerminate(now.plusSeconds(42)));
// }
//
// @Test
// public void testHasMaxRuntimeTimeout() {
// long subsTimeout = 30;
// long minRuntime = 5;
// long maxRuntime = 40;
// // If subs timeout is not explicitly set it is set to the default of 30
// SubscriptionTracker st = new SubscriptionTracker(subsTimeout, minRuntime, maxRuntime);
// Instant now = Instant.now();
// st.onJobStart(now);
// // less than min runtime will not timeout
// assertFalse(st.shouldTerminate(now.plusSeconds(3)));
//
// // equal to min runtime will not timeout
// assertFalse(st.shouldTerminate(now.plusSeconds(5)));
//
// // greater than min runtime and but subscription time out not hit
// assertFalse(st.shouldTerminate(now.plusSeconds(7)));
//
// // if it is subscribed then min runtime does not matter
// st.onSubscribe();
// assertFalse(st.shouldTerminate(now.plusSeconds(7)));
//
// // max runtime exceeded
//
// assertFalse(st.shouldTerminate(now.plusSeconds(42)));
// }
//
// public void testMaxLessThanMinRuntime() {
// long subsTimeout = 30;
// long minRuntime = 5;
// long maxRuntime = 4;
// // If subs timeout is not explicitly set it is set to the default of 30
// try {
// SubscriptionTracker st = new SubscriptionTracker(subsTimeout, minRuntime, maxRuntime);
// fail();
// } catch (IllegalArgumentException e) {
//
// }
//
//
// }
//}
| 7,892 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/job/JobScaleUpDownTests.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.testkit.javadsl.TestKit;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterProto;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason;
import io.mantisrx.runtime.descriptor.StageScalingPolicy.Strategy;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.WorkerAssignments;
import io.mantisrx.server.core.WorkerHost;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.persistence.exceptions.InvalidJobException;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import rx.schedulers.Schedulers;
import rx.subjects.BehaviorSubject;
public class JobScaleUpDownTests {
static ActorSystem system;
final LifecycleEventPublisher lifecycleEventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl());
@BeforeClass
public static void setup() {
system = ActorSystem.create();
TestHelpers.setupMasterConfig();
}
@AfterClass
public static void tearDown() {
TestKit.shutdownActorSystem(system);
system = null;
}
////////////////////////Scale up Tests ////////////////////////////////////
@Test
public void testJobScaleUp() throws Exception, InvalidJobException, io.mantisrx.runtime.command.InvalidJobException {
final TestKit probe = new TestKit(system);
Map<ScalingReason, Strategy> smap = new HashMap<>();
smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null));
smap.put(ScalingReason.DataDrop, new Strategy(ScalingReason.DataDrop, 0.0, 2.0, null));
SchedulingInfo sInfo = new SchedulingInfo.Builder()
.numberOfStages(1)
.multiWorkerScalableStageWithConstraints(1,
new MachineDefinition(1.0,1.0,1.0,3),
Lists.newArrayList(),
Lists.newArrayList(),
new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap))
.build();
String clusterName = "testJobScaleUp";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher);
// send scale up request
jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName+"-1", 1, 2, "", ""), probe.getRef());
JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
System.out.println("ScaleupResp " + scaleResp.message);
assertEquals(SUCCESS, scaleResp.responseCode);
assertEquals(2,scaleResp.getActualNumWorkers());
verify(jobStoreMock, times(1)).storeNewJob(any());
// initial worker
verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
//scale up worker
verify(jobStoreMock, times(1)).storeNewWorker(any());
verify(jobStoreMock, times(6)).updateWorker(any());
verify(jobStoreMock, times(3)).updateJob(any());
// initial worker + job master and scale up worker
verify(schedulerMock, times(3)).scheduleWorker(any());
}
@Test
public void testJobScaleDown() throws Exception, InvalidJobException, io.mantisrx.runtime.command.InvalidJobException {
final TestKit probe = new TestKit(system);
Map<ScalingReason, Strategy> smap = new HashMap<>();
smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null));
smap.put(ScalingReason.DataDrop, new Strategy(ScalingReason.DataDrop, 0.0, 2.0, null));
SchedulingInfo sInfo = new SchedulingInfo.Builder()
.numberOfStages(1)
.multiWorkerScalableStageWithConstraints(2,
new MachineDefinition(1.0,1.0,1.0,3),
Lists.newArrayList(),
Lists.newArrayList(),
new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap))
.build();
String clusterName = "testJobScaleUp";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher);
// send scale down request
jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName+"-1",1, 1, "", ""), probe.getRef());
JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
System.out.println("ScaleDownResp " + scaleResp.message);
assertEquals(SUCCESS, scaleResp.responseCode);
assertEquals(1,scaleResp.getActualNumWorkers());
verify(jobStoreMock, times(1)).storeNewJob(any());
// initial worker
verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
// 9 for worker events + 1 for scale down
verify(jobStoreMock, times(10)).updateWorker(any());
verify(jobStoreMock, times(3)).updateJob(any());
// 1 scale down
verify(schedulerMock, times(1)).unscheduleAndTerminateWorker(any(), any());
// 1 job master + 2 workers
verify(schedulerMock, times(3)).scheduleWorker(any());
}
private void validateHost(Map<Integer, WorkerHost> hosts, int workerIdx, int workerNum, MantisJobState workerState) {
assertTrue(hosts.containsKey(workerNum));
assertEquals(hosts.get(workerNum).getHost(), "host1");
assertEquals(hosts.get(workerNum).getState(), workerState);
assertEquals(hosts.get(workerNum).getMetricsPort(), 8000);
assertEquals(hosts.get(workerNum).getWorkerIndex(), workerIdx);
assertEquals(hosts.get(workerNum).getWorkerNumber(), workerNum);
assertEquals(hosts.get(workerNum).getPort(), Collections.singletonList(9020));
}
// TODO fix for timing issues
//@Test
public void testSchedulingInfo() throws Exception {
CountDownLatch latch = new CountDownLatch(11);
List<JobSchedulingInfo> schedulingChangesList = new CopyOnWriteArrayList<>();
final TestKit probe = new TestKit(system);
Map<ScalingReason, Strategy> smap = new HashMap<>();
smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null));
smap.put(ScalingReason.DataDrop, new Strategy(ScalingReason.DataDrop, 0.0, 2.0, null));
SchedulingInfo sInfo = new SchedulingInfo.Builder()
.numberOfStages(1)
.multiWorkerScalableStageWithConstraints(1,
new MachineDefinition(1.0,1.0,1.0,3),
Lists.newArrayList(),
Lists.newArrayList(),
new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap))
.build();
String clusterName = "testSchedulingInfo";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
CountDownLatch worker1Started = new CountDownLatch(1);
ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher);
JobId jobId = new JobId(clusterName, 1);
JobClusterManagerProto.GetJobSchedInfoRequest getJobSchedInfoRequest = new JobClusterManagerProto.GetJobSchedInfoRequest(jobId);
jobActor.tell(getJobSchedInfoRequest, probe.getRef());
JobClusterManagerProto.GetJobSchedInfoResponse resp = probe.expectMsgClass(JobClusterManagerProto.GetJobSchedInfoResponse.class);
assertEquals(SUCCESS, resp.responseCode);
assertTrue(resp.getJobSchedInfoSubject().isPresent());
ObjectMapper mapper = new ObjectMapper();
BehaviorSubject<JobSchedulingInfo> jobSchedulingInfoBehaviorSubject = resp.getJobSchedInfoSubject().get();
jobSchedulingInfoBehaviorSubject.doOnNext((js) -> {
System.out.println("Got --> " + js.toString());
})
.map((e) -> {
try {
return mapper.writeValueAsString(e);
} catch (JsonProcessingException e1) {
e1.printStackTrace();
return "{\"error\":" + e1.getMessage() + "}";
}
})
.map((js) -> {
try {
return mapper.readValue(js,JobSchedulingInfo.class);
} catch (IOException e) {
e.printStackTrace();
return null;
}
})
.filter((j) -> j!=null)
.doOnNext((js) -> {
// Map<Integer, WorkerAssignments> workerAssignments = js.getWorkerAssignments();
// WorkerAssignments workerAssignments1 = workerAssignments.get(1);
// assertEquals(1, workerAssignments1.getNumWorkers());
// Map<Integer, WorkerHost> hosts = workerAssignments1.getHosts();
// // make sure worker number 1 exists
// assertTrue(hosts.containsKey(1));
})
.doOnCompleted(() -> {
System.out.println("SchedulingInfo completed");
System.out.println(schedulingChangesList.size() + " Sched changes received");
})
.observeOn(Schedulers.io())
.subscribe((js) -> {
latch.countDown();
schedulingChangesList.add(js);
});
// send scale up request
jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(jobId.getId(), 1, 2, "", ""), probe.getRef());
JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
System.out.println("ScaleupResp " + scaleResp.message);
assertEquals(SUCCESS, scaleResp.responseCode);
assertEquals(2,scaleResp.getActualNumWorkers());
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobActor,jobId.getId(),1,new WorkerId(jobId.getId(),1,3));
// worker gets lost
JobTestHelper.sendWorkerTerminatedEvent(probe,jobActor,jobId.getId(),new WorkerId(jobId.getId(),1,3));
// Send replacement worker messages
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobActor,jobId.getId(),1,new WorkerId(jobId.getId(),1,4));
// scale down
jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(jobId.getId(),1, 1, "", ""), probe.getRef());
JobClusterManagerProto.ScaleStageResponse scaleDownResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
System.out.println("ScaleDownResp " + scaleDownResp.message);
assertEquals(SUCCESS, scaleDownResp.responseCode);
assertEquals(1,scaleDownResp.getActualNumWorkers());
// kill job
jobActor.tell(new JobClusterProto.KillJobRequest(jobId,"killed", JobCompletedReason.Killed, "test", probe.getRef()),probe.getRef());
probe.expectMsgClass(JobClusterProto.KillJobResponse.class);
for (JobSchedulingInfo jobSchedulingInfo : schedulingChangesList) {
System.out.println(jobSchedulingInfo);
}
/*
SchedulingChange [jobId=testSchedulingInfo-1,
workerAssignments={
0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
1=WorkerAssignments [stage=1, numWorkers=1, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
3=WorkerHost [state=Launched, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
3=WorkerHost [state=StartInitiated, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
3=WorkerHost [state=Started, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
4=WorkerHost [state=Launched, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
4=WorkerHost [state=StartInitiated, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
4=WorkerHost [state=Started, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
1=WorkerAssignments [stage=1, numWorkers=1, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}
]}]
*/
latch.await(1000, TimeUnit.SECONDS);
System.out.println("---->Verifying scheduling changes " + schedulingChangesList.size());
assertEquals(11, schedulingChangesList.size());
for(int i = 0;i < schedulingChangesList.size(); i++) {
JobSchedulingInfo js = schedulingChangesList.get(i);
// jobid is correct
assertEquals(jobId.getId(),js.getJobId());
Map<Integer, WorkerAssignments> workerAssignments = js.getWorkerAssignments();
//has info about stage 1
System.out.println("WorkerAssignments -> " + workerAssignments);
//assertTrue(workerAssignments.containsKey(1));
switch(i) {
case 0:
WorkerAssignments wa0 = workerAssignments.get(1);
assertEquals(1, wa0.getNumWorkers());
Map<Integer, WorkerHost> hosts0 = wa0.getHosts();
// make sure worker number 2 exists
validateHost(hosts0, 0, 2, MantisJobState.Started);
break;
// scale up by 1
case 1:
WorkerAssignments wa1 = workerAssignments.get(1);
assertEquals(2, wa1.getNumWorkers());
Map<Integer, WorkerHost> hosts1 = wa1.getHosts();
assertEquals(1, hosts1.size());
// first update has only numWorkers updated but the new worker is still in Accepted state, so no host entry for it
validateHost(hosts1, 0, 2, MantisJobState.Started);
assertFalse(hosts1.containsKey(3));
break;
case 2:
WorkerAssignments wa2 = workerAssignments.get(1);
assertEquals(2, wa2.getNumWorkers());
Map<Integer, WorkerHost> hosts2 = wa2.getHosts();
assertEquals(2, hosts2.size());
// next update should have both numWorkers and the new worker in Launched state
validateHost(hosts2, 0, 2, MantisJobState.Started);
validateHost(hosts2, 1, 3, MantisJobState.Launched);
break;
case 3:
WorkerAssignments wa3 = workerAssignments.get(1);
assertEquals(2, wa3.getNumWorkers());
Map<Integer, WorkerHost> hosts3 = wa3.getHosts();
assertEquals(2, hosts3.size());
// this update is for new worker in StartInit state
validateHost(hosts3, 0, 2, MantisJobState.Started);
validateHost(hosts3, 1, 3, MantisJobState.StartInitiated);
break;
case 4:
WorkerAssignments wa4 = workerAssignments.get(1);
assertEquals(2, wa4.getNumWorkers());
Map<Integer, WorkerHost> hosts4 = wa4.getHosts();
assertEquals(2, hosts4.size());
// this update is for new worker in Started state
validateHost(hosts4, 0, 2, MantisJobState.Started);
validateHost(hosts4, 1, 3, MantisJobState.Started);
break;
case 5:
// worker 3 is lost and should be resubmitted
WorkerAssignments wa5 = workerAssignments.get(1);
assertEquals(2, wa5.getNumWorkers());
Map<Integer, WorkerHost> hosts5 = wa5.getHosts();
assertEquals(1, hosts5.size());
validateHost(hosts5, 0, 2, MantisJobState.Started);
assertFalse(hosts5.containsKey(3));
break;
case 6:
// worker 3 is replaced by worker num 4
WorkerAssignments wa6 = workerAssignments.get(1);
assertEquals(2, wa6.getNumWorkers());
Map<Integer, WorkerHost> hosts6 = wa6.getHosts();
// this update should have both numWorkers and the new worker in Launched state
assertEquals(2, hosts6.size());
validateHost(hosts6, 0, 2, MantisJobState.Started);
validateHost(hosts6, 1, 4, MantisJobState.Launched);
break;
case 7:
WorkerAssignments wa7 = workerAssignments.get(1);
assertEquals(2, wa7.getNumWorkers());
Map<Integer, WorkerHost> hosts7 = wa7.getHosts();
// update for new worker in StartInit state
assertEquals(2, hosts7.size());
validateHost(hosts7, 0, 2, MantisJobState.Started);
validateHost(hosts7, 1, 4, MantisJobState.StartInitiated);
break;
case 8:
WorkerAssignments wa8 = workerAssignments.get(1);
assertEquals(2, wa8.getNumWorkers());
Map<Integer, WorkerHost> hosts8 = wa8.getHosts();
// update for new worker in Started state
assertEquals(2, hosts8.size());
validateHost(hosts8, 0, 2, MantisJobState.Started);
validateHost(hosts8, 1, 4, MantisJobState.Started);
break;
case 9:
// scale down, worker 4 should be gone now and numWorkers set to 1
WorkerAssignments wa9 = workerAssignments.get(1);
assertEquals(1, wa9.getNumWorkers());
Map<Integer, WorkerHost> hosts9 = wa9.getHosts();
assertTrue(hosts9.containsKey(2));
assertEquals(1, hosts9.size());
validateHost(hosts9, 0, 2, MantisJobState.Started);
break;
case 10:
// job has been killed
assertTrue(workerAssignments.isEmpty());
break;
default:
fail();
}
}
//
// verify(jobStoreMock, times(1)).storeNewJob(any());
// // initial worker
// verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
//
// //scale up worker
// verify(jobStoreMock, times(1)).storeNewWorker(any());
//
// // verify(jobStoreMock, times(17)).updateWorker(any());
//
// verify(jobStoreMock, times(3)).updateJob(any());
//
// // initial worker + job master and scale up worker + resubmit
// verify(schedulerMock, times(4)).scheduleWorker(any());
//
// verify(schedulerMock, times(4)).unscheduleAndTerminateWorker(any(), any());
}
@Test
public void testJobScaleUpFailsIfNoScaleStrategy() throws Exception {
final TestKit probe = new TestKit(system);
Map<ScalingReason, Strategy> smap = new HashMap<>();
SchedulingInfo sInfo = new SchedulingInfo.Builder()
.numberOfStages(1)
.multiWorkerScalableStageWithConstraints(1,
new MachineDefinition(1.0,1.0,1.0,3),
Lists.newArrayList(),
Lists.newArrayList(),
new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap))
.build();
String clusterName = "testJobScaleUpFailsIfNoScaleStrategy";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher);
// send scale up request
jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName+"-1",1, 2, "", ""), probe.getRef());
JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
System.out.println("ScaleupResp " + scaleResp.message);
assertEquals(CLIENT_ERROR, scaleResp.responseCode);
assertEquals(0, scaleResp.getActualNumWorkers());
verify(jobStoreMock, times(1)).storeNewJob(any());
// initial worker
verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
//no scale up worker happened
verify(jobStoreMock, times(0)).storeNewWorker(any());
verify(jobStoreMock, times(3)).updateWorker(any());
verify(jobStoreMock, times(3)).updateJob(any());
// initial worker only
verify(schedulerMock, times(1)).scheduleWorker(any());
}
@Test
public void testJobScaleUpFailsIfMinEqualsMax() throws Exception {
final TestKit probe = new TestKit(system);
Map<ScalingReason, Strategy> smap = new HashMap<>();
SchedulingInfo sInfo = new SchedulingInfo.Builder()
.numberOfStages(1)
.multiWorkerScalableStageWithConstraints(1,
new MachineDefinition(1.0,1.0,1.0,3),
Lists.newArrayList(),
Lists.newArrayList(),
new StageScalingPolicy(1, 1, 1, 1, 1, 0, smap))
.build();
String clusterName = "testJobScaleUpFailsIfNoScaleStrategy";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system,probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher);
// send scale up request
jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName + "-1",1, 3, "", ""), probe.getRef());
JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
System.out.println("ScaleupResp " + scaleResp.message);
assertEquals(CLIENT_ERROR, scaleResp.responseCode);
assertEquals(0, scaleResp.getActualNumWorkers());
verify(jobStoreMock, times(1)).storeNewJob(any());
// initial worker
verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
//no scale up worker happened
verify(jobStoreMock, times(0)).storeNewWorker(any());
verify(jobStoreMock, times(3)).updateWorker(any());
verify(jobStoreMock, times(3)).updateJob(any());
// initial worker only
verify(schedulerMock, times(1)).scheduleWorker(any());
}
@Test
public void stageScalingPolicyTest() {
int stageNo = 1;
int min = 0;
int max = 10;
int increment = 1;
int decrement = 1;
long cooldownsecs = 300;
Map<ScalingReason, Strategy> smap = new HashMap<>();
smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null));
StageScalingPolicy ssp = new StageScalingPolicy(stageNo, min, max, increment, decrement, cooldownsecs, smap);
assertTrue(ssp.isEnabled());
}
@Test
public void stageScalingPolicyNoStrategyTest() {
int stageNo = 1;
int min = 0;
int max = 10;
int increment = 1;
int decrement = 1;
long cooldownsecs = 300;
Map<ScalingReason, Strategy> smap = new HashMap<>();
StageScalingPolicy ssp = new StageScalingPolicy(stageNo, min, max, increment, decrement, cooldownsecs, smap);
assertFalse(ssp.isEnabled());
}
@Test
public void stageScalingPolicyMinEqMaxTest() {
int stageNo = 1;
int min = 10;
int max = 10;
int increment = 1;
int decrement = 1;
long cooldownsecs = 300;
Map<ScalingReason, Strategy> smap = new HashMap<>();
smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null));
StageScalingPolicy ssp = new StageScalingPolicy(stageNo, min, max, increment, decrement, cooldownsecs, smap);
assertFalse(ssp.isEnabled());
}
@Test
public void stageScalingPolicyMinGreaterThanMaxTest() {
int stageNo = 1;
int min = 10;
int max = 1;
int increment = 1;
int decrement = 1;
long cooldownsecs = 300;
Map<ScalingReason, Strategy> smap = new HashMap<>();
smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null));
StageScalingPolicy ssp = new StageScalingPolicy(stageNo, min, max, increment, decrement, cooldownsecs, smap);
assertTrue(ssp.isEnabled());
// max will be set equal to min
assertEquals(10, ssp.getMax());
}
}
| 7,893 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/job/JobTestLifecycle.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SERVER_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS;
import static java.util.Optional.empty;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.PoisonPill;
import akka.testkit.javadsl.TestKit;
import com.netflix.mantis.master.scheduler.TestHelpers;
import io.mantisrx.master.events.*;
import io.mantisrx.master.jobcluster.job.JobActor.WorkerNumberGenerator;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterProto;
import io.mantisrx.master.jobcluster.proto.JobProto;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.command.InvalidJobException;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.core.domain.JobMetadata;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.domain.IJobClusterDefinition;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.IMantisPersistenceProvider;
import io.mantisrx.server.master.persistence.KeyValueBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.ScheduleRequest;
import io.mantisrx.server.master.store.FileBasedStore;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.io.IOException;
import java.net.URL;
import java.time.Instant;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.InOrder;
import org.mockito.Mockito;
public class JobTestLifecycle {
static ActorSystem system;
private static MantisJobStore jobStore;
private static IMantisPersistenceProvider storageProvider;
private static LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new WorkerEventSubscriberLoggingImpl());
private final CostsCalculator costsCalculator = CostsCalculator.noop();
private static final String user = "mantis";
@BeforeClass
public static void setup() {
system = ActorSystem.create();
TestHelpers.setupMasterConfig();
storageProvider = new KeyValueBasedPersistenceProvider(new FileBasedStore(), eventPublisher);
jobStore = new MantisJobStore(storageProvider);
}
@AfterClass
public static void tearDown() {
JobTestHelper.deleteAllFiles();
TestKit.shutdownActorSystem(system);
system = null;
}
@Test
public void testJobSubmitWithoutInit() {
final TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitCluster";
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName);
JobDefinition jobDefn;
try {
jobDefn = JobTestHelper.generateJobDefinition(clusterName);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName,1))
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefn)
.build();
final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher, costsCalculator));
String jobId = clusterName + "-1";
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println(resp.message);
assertEquals(CLIENT_ERROR, resp.responseCode);
} catch(Exception e) {
e.printStackTrace();
}
}
@Test
public void testJobSubmit() {
final TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitCluster";
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName);
JobDefinition jobDefn;
try {
jobDefn = JobTestHelper.generateJobDefinition(clusterName);
// IMantisStorageProvider storageProvider = new SimpleCachedFileStorageProvider();
// MantisJobStore jobStore = new MantisJobStore(storageProvider);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName,1))
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefn)
.build();
final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher, costsCalculator));
jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class);
assertEquals(SUCCESS, initMsg.responseCode);
String jobId = clusterName + "-1";
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp + " msg " + resp.message);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState());
assertTrue(resp.getJobMetadata().get().getStageMetadata(1).isPresent());
// send launched event
WorkerId workerId = new WorkerId(jobId, 0, 1);
int stageNum = 1;
JobTestHelper.sendWorkerLaunchedEvent(probe, jobActor, workerId, stageNum);
JobTestHelper.sendStartInitiatedEvent(probe, jobActor, stageNum, workerId);
// send heartbeat
JobTestHelper.sendHeartBeat(probe, jobActor, jobId, stageNum, workerId);
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp2 + " msg " + resp2.message);
assertEquals(SUCCESS, resp2.responseCode);
assertEquals(JobState.Launched,resp2.getJobMetadata().get().getState());
verify(jobStoreMock, times(1)).storeNewJob(any());
verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
verify(jobStoreMock, times(3)).updateWorker(any());
verify(jobStoreMock, times(3)).updateJob(any());
//assertEquals(jobActor, probe.getLastSender());
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testJobSubmitPerpetual() {
final TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitPerpetual";
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName);
JobDefinition jobDefn;
try {
MachineDefinition machineDefinition = new MachineDefinition(1.0, 1.0, 1.0, 1.0, 3);
SchedulingInfo schedInfo = new SchedulingInfo.Builder()
.numberOfStages(1)
.singleWorkerStageWithConstraints(machineDefinition,
Lists.newArrayList(),
Lists.newArrayList()).build();
jobDefn = new JobDefinition.Builder()
.withName(clusterName)
.withParameters(Lists.newArrayList())
.withLabels(Lists.newArrayList())
.withSchedulingInfo(schedInfo)
.withArtifactName("myart")
.withSubscriptionTimeoutSecs(30)
.withUser("njoshi")
.withNumberOfStages(schedInfo.getStages().size())
.withJobSla(new JobSla(0, 0, null, MantisJobDurationType.Perpetual, null))
.build();
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName,1))
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefn)
.build();
final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher, costsCalculator));
jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class);
assertEquals(SUCCESS, initMsg.responseCode);
String jobId = clusterName + "-1";
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp + " msg " + resp.message);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState());
assertTrue(resp.getJobMetadata().get().getStageMetadata(1).isPresent());
// send launched event
WorkerId workerId = new WorkerId(jobId, 0, 1);
int stageNum = 1;
JobTestHelper.sendWorkerLaunchedEvent(probe, jobActor, workerId, stageNum);
JobTestHelper.sendStartInitiatedEvent(probe, jobActor, stageNum, workerId);
// send heartbeat
JobTestHelper.sendHeartBeat(probe, jobActor, jobId, stageNum, workerId);
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp2 + " msg " + resp2.message);
assertEquals(SUCCESS, resp2.responseCode);
assertEquals(JobState.Launched,resp2.getJobMetadata().get().getState());
verify(jobStoreMock, times(1)).storeNewJob(any());
verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
verify(jobStoreMock, times(3)).updateWorker(any());
verify(jobStoreMock, times(3)).updateJob(any());
//verify(jobStoreMock, times(3))
verify(schedulerMock,times(1)).scheduleWorker(any());
JobMetadata jobMetadata = new JobMetadata(jobId, new URL("http://myart" +
""),1,"njoshi",schedInfo,Lists.newArrayList(),0,10, 0);
ScheduleRequest expectedScheduleRequest = new ScheduleRequest(workerId,
1,4, jobMetadata,MantisJobDurationType.Perpetual,machineDefinition,Lists.newArrayList(),Lists.newArrayList(),0,empty());
verify(schedulerMock).scheduleWorker(expectedScheduleRequest);
//assertEquals(jobActor, probe.getLastSender());
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testJobSubmitInitalizationFails() {
final TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitPersistenceFails";
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName);
JobDefinition jobDefn;
try {
jobDefn = JobTestHelper.generateJobDefinition(clusterName);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
Mockito.doThrow(IOException.class).when(jobStoreMock).storeNewJob(any());
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName,1))
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefn)
.build();
final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher, costsCalculator));
jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class);
assertEquals(SERVER_ERROR, initMsg.responseCode);
System.out.println(initMsg.message);
String jobId = clusterName + "-1";
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp + " msg " + resp.message);
assertEquals(CLIENT_ERROR, resp.responseCode);
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testJobSubmitWithMultipleWorkers() {
final TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitWithMultipleWorkersCluster";
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName);
JobDefinition jobDefn;
try {
SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerStageWithConstraints(2, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList()).build();
jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName,2))
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefn)
.build();
final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher, costsCalculator));
jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class);
assertEquals(SUCCESS, initMsg.responseCode);
String jobId = clusterName + "-2";
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp + " msg " + resp.message);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState());
int stageNo = 1;
// send launched event
WorkerId workerId = new WorkerId(jobId, 0, 1);
// send heartbeat
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId);
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp2 + " msg " + resp2.message);
assertEquals(SUCCESS, resp2.responseCode);
// Only 1 worker has started.
assertEquals(JobState.Accepted,resp2.getJobMetadata().get().getState());
// send launched event
WorkerId workerId2 = new WorkerId(jobId, 1, 2);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId2);
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp3 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp3 + " msg " + resp3.message);
assertEquals(SUCCESS, resp3.responseCode);
// 2 worker have started so job should be started.
assertEquals(JobState.Launched,resp3.getJobMetadata().get().getState());
verify(jobStoreMock, times(1)).storeNewJob(any());
verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
verify(jobStoreMock, times(6)).updateWorker(any());
verify(jobStoreMock, times(3)).updateJob(any());
//assertEquals(jobActor, probe.getLastSender());
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testJobSubmitWithMultipleStagesAndWorkers() {
final TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitWithMultipleStagesAndWorkers";
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName);
JobDefinition jobDefn;
try {
Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>();
smap.put(StageScalingPolicy.ScalingReason.Memory, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.Memory, 0.1, 0.6, null));
SchedulingInfo.Builder builder = new SchedulingInfo.Builder()
.numberOfStages(2)
.multiWorkerScalableStageWithConstraints(
2,
new MachineDefinition(1, 1.24, 0.0, 1, 1),
null, null,
new StageScalingPolicy(1, 1, 3, 1, 1, 60, smap)
)
.multiWorkerScalableStageWithConstraints(
3,
new MachineDefinition(1, 1.24, 0.0, 1, 1),
null, null,
new StageScalingPolicy(1, 1, 3, 1, 1, 60, smap)
);
SchedulingInfo sInfo = builder.build();
System.out.println("SchedulingInfo " + sInfo);
jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName,1))
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefn)
.build();
final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher, costsCalculator));
jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class);
assertEquals(SUCCESS, initMsg.responseCode);
String jobId = clusterName + "-1";
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp + " msg " + resp.message);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState());
int stageNo = 0;
// send launched event
WorkerId workerId = new WorkerId(jobId, 0, 1);
// send heartbeat
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId);
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp2 + " msg " + resp2.message);
assertEquals(SUCCESS, resp2.responseCode);
// Only 1 worker has started.
assertEquals(JobState.Accepted,resp2.getJobMetadata().get().getState());
// send launched events for the rest of the workers
int nextWorkerNumber = 1;
int stage = 0;
Iterator<Map.Entry<Integer, StageSchedulingInfo>> it = sInfo.getStages().entrySet().iterator();
while(it.hasNext()) {
Map.Entry<Integer, StageSchedulingInfo> integerStageSchedulingInfoEntry = it.next();
StageSchedulingInfo stageSchedulingInfo = integerStageSchedulingInfoEntry.getValue();
System.out.println("Workers -> " + stageSchedulingInfo.getNumberOfInstances() + " in stage " + stage);
for(int i=0; i<stageSchedulingInfo.getNumberOfInstances(); i++) {
WorkerId wId = new WorkerId(jobId, i, nextWorkerNumber++);
System.out.println("Sending events for worker --> " + wId + " Stage " + stage);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stage, wId);
}
stage++;
}
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp3 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp3 + " msg " + resp3.message);
assertEquals(SUCCESS, resp3.responseCode);
// 2 worker have started so job should be started.
assertEquals(JobState.Launched,resp3.getJobMetadata().get().getState());
verify(jobStoreMock, times(1)).storeNewJob(any());
verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
verify(jobStoreMock, times(19)).updateWorker(any());
verify(jobStoreMock, times(3)).updateJob(any());
//assertEquals(jobActor, probe.getLastSender());
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testListActiveWorkers() {
final TestKit probe = new TestKit(system);
String clusterName = "testListActiveWorkers";
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName);
JobDefinition jobDefn;
try {
SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerStageWithConstraints(2, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList()).build();
jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName,2))
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefn)
.build();
final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher, costsCalculator));
jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class);
assertEquals(SUCCESS, initMsg.responseCode);
String jobId = clusterName + "-2";
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp + " msg " + resp.message);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState());
int stageNo = 1;
// send launched event
WorkerId workerId = new WorkerId(jobId, 0, 1);
// send heartbeat
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId);
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp2 + " msg " + resp2.message);
assertEquals(SUCCESS, resp2.responseCode);
// Only 1 worker has started.
assertEquals(JobState.Accepted,resp2.getJobMetadata().get().getState());
// send launched event
WorkerId workerId2 = new WorkerId(jobId, 1, 2);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId2);
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp3 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp3 + " msg " + resp3.message);
assertEquals(SUCCESS, resp3.responseCode);
// 2 worker have started so job should be started.
assertEquals(JobState.Launched,resp3.getJobMetadata().get().getState());
jobActor.tell(new JobClusterManagerProto.ListWorkersRequest(new JobId(clusterName, 1)),probe.getRef());
JobClusterManagerProto.ListWorkersResponse listWorkersResponse = probe.expectMsgClass(JobClusterManagerProto.ListWorkersResponse.class);
assertEquals(2, listWorkersResponse.getWorkerMetadata().size());
int cnt = 0;
for(IMantisWorkerMetadata workerMeta : listWorkersResponse.getWorkerMetadata()) {
if(workerMeta.getWorkerNumber() == 1 || workerMeta.getWorkerNumber() == 2) {
cnt ++;
}
}
assertEquals(2, cnt);
verify(jobStoreMock, times(1)).storeNewJob(any());
verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
verify(jobStoreMock, times(6)).updateWorker(any());
verify(jobStoreMock, times(3)).updateJob(any());
//assertEquals(jobActor, probe.getLastSender());
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testkill() throws Exception {
final TestKit probe = new TestKit(system);
String clusterName = "testKillCluster";
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName);
JobDefinition jobDefn = JobTestHelper.generateJobDefinition(clusterName);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName,3))
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefn)
.build();
final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher, costsCalculator));
jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
probe.expectMsgClass(JobProto.JobInitialized.class);
probe.watch(jobActor);
JobId jId = new JobId(clusterName,3);
jobActor.tell(new JobClusterProto.KillJobRequest(
jId, "test reason", JobCompletedReason.Normal, "nj", probe.getRef()), probe.getRef());
probe.expectMsgClass(JobClusterProto.KillJobResponse.class);
JobTestHelper.sendWorkerTerminatedEvent(probe,jobActor,jId.getId(),new WorkerId(jId.getId(),0,1));
Thread.sleep(1000);
verify(schedulerMock, times(1)).unscheduleAndTerminateWorker(any(), any());
verify(schedulerMock, times(1)).scheduleWorker(any());
verify(jobStoreMock, times(1)).storeNewJob(any());
verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
verify(jobStoreMock, times(2)).updateJob(any());
//verify(jobStoreMock, times(1)).updateWorker(any());
jobActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
probe.expectTerminated(jobActor);
}
@Test
public void testHeartBeatEnforcement() {
final TestKit probe = new TestKit(system);
String clusterName= "testHeartBeatEnforcementCluster";
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName);
JobDefinition jobDefn;
try {
SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerStageWithConstraints(2, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList()).build();
jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName,2))
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefn)
.build();
final ActorRef jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock, eventPublisher, costsCalculator));
jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class);
assertEquals(SUCCESS, initMsg.responseCode);
String jobId = clusterName + "-2";
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp + " msg " + resp.message);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState());
int stageNo = 1;
WorkerId workerId = new WorkerId(jobId, 0, 1);
// send Launched, Initiated and heartbeat
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId);
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp2 + " msg " + resp2.message);
assertEquals(SUCCESS, resp2.responseCode);
// Only 1 worker has started.
assertEquals(JobState.Accepted,resp2.getJobMetadata().get().getState());
// send launched event
WorkerId workerId2 = new WorkerId(jobId, 1, 2);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId2);
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp3 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp3 + " msg " + resp3.message);
assertEquals(SUCCESS, resp3.responseCode);
// 2 worker have started so job should be started.
assertEquals(JobState.Launched,resp3.getJobMetadata().get().getState());
JobTestHelper.sendHeartBeat(probe,jobActor,jobId,1,workerId2);
JobTestHelper.sendHeartBeat(probe,jobActor,jobId,1,workerId);
// check hb status in the future where we expect all last HBs to be stale.
Instant now = Instant.now();
jobActor.tell(new JobProto.CheckHeartBeat(now.plusSeconds(240)), probe.getRef());
Thread.sleep(1000);
// 2 original submissions and 2 resubmits because of HB timeouts
verify(schedulerMock, times(4)).scheduleWorker(any());
// 2 kills due to resubmits
verify(schedulerMock, times(2)).unscheduleAndTerminateWorker(any(), any());
//assertEquals(jobActor, probe.getLastSender());
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
//
@Test
public void testLostWorkerGetsReplaced() {
final TestKit probe = new TestKit(system);
String clusterName= "testLostWorkerGetsReplaced";
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName);
ActorRef jobActor = null;
JobDefinition jobDefn;
try {
SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerStageWithConstraints(2, new MachineDefinition(1.0,1.0,1.0,3), Lists.newArrayList(), Lists.newArrayList()).build();
jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo);
MantisScheduler schedulerMock = mock(MantisScheduler.class);
//MantisJobStore jobStoreMock = mock(MantisJobStore.class);
MantisJobStore jobStoreSpied = Mockito.spy(jobStore);
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName,2))
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefn)
.build();
jobActor = system.actorOf(JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreSpied, schedulerMock, eventPublisher, costsCalculator));
jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class);
assertEquals(SUCCESS, initMsg.responseCode);
String jobId = clusterName + "-2";
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp + " msg " + resp.message);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(JobState.Accepted,resp.getJobMetadata().get().getState());
int stageNo = 1;
// send launched event
WorkerId workerId = new WorkerId(jobId, 0, 1);
// send heartbeat
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId);
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp2 + " msg " + resp2.message);
assertEquals(SUCCESS, resp2.responseCode);
// Only 1 worker has started.
assertEquals(JobState.Accepted,resp2.getJobMetadata().get().getState());
// send launched event
WorkerId workerId2 = new WorkerId(jobId, 1, 2);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId2);
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
GetJobDetailsResponse resp3 = probe.expectMsgClass(GetJobDetailsResponse.class);
System.out.println("resp " + resp3 + " msg " + resp3.message);
assertEquals(SUCCESS, resp3.responseCode);
// 2 worker have started so job should be started.
assertEquals(JobState.Launched,resp3.getJobMetadata().get().getState());
// worker 2 gets terminated abnormally
JobTestHelper.sendWorkerTerminatedEvent(probe, jobActor, jobId, workerId2);
// replaced worker comes up and sends events
WorkerId workerId2_replaced = new WorkerId(jobId, 1, 3);
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId2_replaced);
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", jobId), probe.getRef());
GetJobDetailsResponse resp4 = probe.expectMsgClass(GetJobDetailsResponse.class);
IMantisJobMetadata jobMeta = resp4.getJobMetadata().get();
Map<Integer, ? extends IMantisStageMetadata> stageMetadata = jobMeta.getStageMetadata();
IMantisStageMetadata stage = stageMetadata.get(1);
for (JobWorker worker : stage.getAllWorkers()) {
System.out.println("worker -> " + worker.getMetadata());
}
// 2 initial schedules and 1 replacement
verify(schedulerMock, timeout(1_000).times(3)).scheduleWorker(any());
// archive worker should get called once for the dead worker
// verify(jobStoreMock, timeout(1_000).times(1)).archiveWorker(any());
Mockito.verify(jobStoreSpied).archiveWorker(any());
//assertEquals(jobActor, probe.getLastSender());
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
} finally {
system.stop(jobActor);
}
}
@Test
public void workerNumberGeneratorInvalidArgsTest() {
try {
WorkerNumberGenerator wng = new WorkerNumberGenerator(-1, 10);
fail();
} catch(Exception e) {
}
try {
WorkerNumberGenerator wng = new WorkerNumberGenerator(0, 0);
fail();
} catch(Exception e) {
}
}
@Test
public void workerNumberGeneratorTest() {
MantisJobMetadataImpl mantisJobMetaMock = mock(MantisJobMetadataImpl.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
int incrementStep = 10;
WorkerNumberGenerator wng = new WorkerNumberGenerator(0, incrementStep);
for(int i=1; i<incrementStep; i++) {
assertEquals(i, wng.getNextWorkerNumber(mantisJobMetaMock, jobStoreMock));
}
try {
verify(mantisJobMetaMock,times(1)).setNextWorkerNumberToUse(incrementStep, jobStoreMock);
// verify(jobStoreMock, times(1)).updateJob(any());
} catch ( Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void workerNumberGeneratorWithNonZeroLastUsedTest() {
MantisJobMetadataImpl mantisJobMetaMock = mock(MantisJobMetadataImpl.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
int incrementStep = 10;
int lastNumber = 7;
WorkerNumberGenerator wng = new WorkerNumberGenerator(lastNumber, incrementStep);
for(int i=lastNumber+1; i<incrementStep; i++) {
assertEquals(i, wng.getNextWorkerNumber(mantisJobMetaMock, jobStoreMock));
}
try {
verify(mantisJobMetaMock,times(1)).setNextWorkerNumberToUse(lastNumber + incrementStep, jobStoreMock);
//verify(jobStoreMock,times(1)).updateJob(any());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void workerNumberGeneratorTest2() {
MantisJobMetadataImpl mantisJobMetaMock = mock(MantisJobMetadataImpl.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
WorkerNumberGenerator wng = new WorkerNumberGenerator();
for(int i=1; i<20; i++) {
assertEquals(i, wng.getNextWorkerNumber(mantisJobMetaMock, jobStoreMock));
}
try {
InOrder inOrder = Mockito.inOrder(mantisJobMetaMock);
inOrder.verify(mantisJobMetaMock).setNextWorkerNumberToUse(10, jobStoreMock);
inOrder.verify(mantisJobMetaMock).setNextWorkerNumberToUse(20, jobStoreMock);
//verify(jobStoreMock, times(2)).updateJob(any());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void workerNumberGeneratorUpdatesStoreTest2() {
//MantisJobMetadataImpl mantisJobMetaMock = mock(MantisJobMetadataImpl.class);
JobDefinition jobDefnMock = mock(JobDefinition.class);
MantisJobMetadataImpl mantisJobMeta = new MantisJobMetadataImpl(JobId.fromId("job-1").get(),
Instant.now().toEpochMilli(),Instant.now().toEpochMilli(), jobDefnMock, JobState.Accepted,
0, 20, 20);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
WorkerNumberGenerator wng = new WorkerNumberGenerator();
for(int i=1; i<20; i++) {
assertEquals(i, wng.getNextWorkerNumber(mantisJobMeta, jobStoreMock));
}
try {
//InOrder inOrder = Mockito.inOrder(mantisJobMetaMock);
//inOrder.verify(mantisJobMetaMock).setNextWorkerNumberToUse(10, jobStoreMock);
//inOrder.verify(mantisJobMetaMock).setNextWorkerNumberToUse(20, jobStoreMock);
verify(jobStoreMock, times(2)).updateJob(any());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void workerNumberGeneratorExceptionUpdatingJobTest() {
MantisJobMetadataImpl mantisJobMetaMock = mock(MantisJobMetadataImpl.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
WorkerNumberGenerator wng = new WorkerNumberGenerator();
try {
Mockito.doThrow(IOException.class).when(jobStoreMock).updateJob(any());
wng.getNextWorkerNumber(mantisJobMetaMock, jobStoreMock);
} catch(Exception e) {
e.printStackTrace();
}
}
}
| 7,894 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/job/MantisJobMetadataViewTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import io.mantisrx.master.api.akka.payloads.PayloadUtils;
import io.mantisrx.server.master.domain.DataFormatAdapter;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.SerializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
import io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import java.util.Collections;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MantisJobMetadataViewTest {
private static final Logger logger = LoggerFactory.getLogger(MantisJobMetadataViewTest.class);
private static final ObjectMapper mapper = new ObjectMapper();
static {
mapper
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false)
.registerModule(new Jdk8Module());
}
@Test
public void testJsonReadWrite() throws Exception {
String metadata = PayloadUtils.getStringFromResource("persistence/job_metadata_view.json");
MantisJobMetadataView metadataView = mapper.readValue(metadata, MantisJobMetadataView.class);
SimpleFilterProvider filterProvider = new SimpleFilterProvider();
filterProvider.setFailOnUnknownId(false);
String output = mapper.writer(filterProvider).writeValueAsString(metadataView);
assertEquals(metadata.replaceAll("\\s", ""), output.replace("\"terminatedAt\":-1,", ""));
}
@Test
public void testJsonWithTerminatedAt() throws Exception {
String metadata = PayloadUtils.getStringFromResource("persistence/job_metadata_view.json");
MantisJobMetadataView metadataView = mapper.readValue(metadata, MantisJobMetadataView.class);
FilterableMantisJobMetadataWritable jobMetadata =
(FilterableMantisJobMetadataWritable) metadataView.getJobMetadata();
jobMetadata.addJobStageIfAbsent(metadataView.getStageMetadataList().get(0));
IMantisJobMetadata iJobMetadata = DataFormatAdapter.convertMantisJobWriteableToMantisJobMetadata(jobMetadata,
null);
MantisJobMetadataView metadataViewWithTerminate = new MantisJobMetadataView(iJobMetadata, 999,
Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), false);
SimpleFilterProvider filterProvider = new SimpleFilterProvider();
filterProvider.setFailOnUnknownId(false);
String output = mapper.writer(filterProvider).writeValueAsString(metadataViewWithTerminate);
assertTrue(output.indexOf("\"terminatedAt\":999") > 0);
}
}
| 7,895 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/job/JobTestHelper.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.testkit.javadsl.TestKit;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.master.jobcluster.job.worker.WorkerStatus;
import io.mantisrx.master.jobcluster.job.worker.WorkerTerminate;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.SubmitJobRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterProto;
import io.mantisrx.master.jobcluster.proto.JobProto;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.command.InvalidJobException;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.Status.TYPE;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.domain.IJobClusterDefinition;
import io.mantisrx.server.master.domain.JobClusterConfig;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import io.mantisrx.server.master.scheduler.WorkerLaunched;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.io.File;
import java.time.Duration;
import java.time.Instant;
import java.util.Optional;
import javax.annotation.Nullable;
import org.junit.Test;
public class JobTestHelper {
private final static String SPOOL_DIR = "/tmp/MantisSpool";
private final static String ARCHIVE_DIR = "/tmp/MantisArchive";
public static void createDirsIfRequired() {
File spoolDir = new File(SPOOL_DIR);
File namedJobsDir = new File(SPOOL_DIR + "/" + "namedJobs");
File archiveDir = new File(ARCHIVE_DIR);
if (!spoolDir.exists()) {
spoolDir.mkdir();
}
if (!archiveDir.exists()) {
archiveDir.mkdir();
}
if (!namedJobsDir.exists()) {
namedJobsDir.mkdir();
}
}
public static void deleteAllFiles() {
try {
File spoolDir = new File(SPOOL_DIR);
File archiveDir = new File(ARCHIVE_DIR);
deleteDir(spoolDir);
deleteDir(archiveDir);
} catch (Exception e) {
}
}
private static void deleteDir(File dir) {
if (dir != null) {
for (File file : dir.listFiles()) {
if (file.isDirectory()) {
deleteDir(file);
} else {
boolean delete = file.delete();
}
}
}
}
public static IJobClusterDefinition generateJobClusterDefinition(String name, SchedulingInfo schedInfo) {
return generateJobClusterDefinition(name, schedInfo, WorkerMigrationConfig.DEFAULT);
}
public static IJobClusterDefinition generateJobClusterDefinition(String name, SchedulingInfo schedInfo, WorkerMigrationConfig migrationConfig) {
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName("myart")
.withSchedulingInfo(schedInfo)
.withVersion("0.0.1")
.build();
return new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(clusterConfig)
.withName(name)
.withUser("user")
.withParameters(Lists.newArrayList())
.withIsReadyForJobMaster(true)
.withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo"))
.withMigrationConfig(migrationConfig)
.build();
}
public static IJobClusterDefinition generateJobClusterDefinition(String name) {
return generateJobClusterDefinition(name, new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(new MachineDefinition(0, 0, 0, 0, 0), Lists.newArrayList(), Lists.newArrayList()).build());
}
public static JobDefinition generateJobDefinition(String clusterName, SchedulingInfo schedInfo) throws InvalidJobException {
return new JobDefinition.Builder()
.withName(clusterName)
.withParameters(Lists.newArrayList())
.withLabels(Lists.newArrayList())
.withSchedulingInfo(schedInfo)
.withArtifactName("myart")
.withSubscriptionTimeoutSecs(0)
.withUser("njoshi")
.withNumberOfStages(schedInfo.getStages().size())
.withJobSla(new JobSla(0, 0, null, MantisJobDurationType.Perpetual, null))
.build();
}
public static JobDefinition generateJobDefinition(String clusterName) throws InvalidJobException {
return generateJobDefinition(clusterName, new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(new MachineDefinition(1.0, 1.0, 1.0, 1.0, 3), Lists.newArrayList(), Lists.newArrayList()).build());
}
public static void sendCheckHeartBeat(final TestKit probe, final ActorRef jobActor, Instant now) {
jobActor.tell(new JobProto.CheckHeartBeat(now), probe.getRef());
}
public static void sendHeartBeat(final TestKit probe, final ActorRef jobActor, String jobId, int stageNo, WorkerId workerId2) {
sendHeartBeat(probe, jobActor, jobId, stageNo, workerId2, System.currentTimeMillis());
}
public static void sendLaunchedInitiatedStartedEventsToWorker(final TestKit probe, final ActorRef jobActor, String jobId,
int stageNo, WorkerId workerId2) {
JobTestHelper.sendWorkerLaunchedEvent(probe, jobActor, workerId2, stageNo);
JobTestHelper.sendStartInitiatedEvent(probe, jobActor, stageNo, workerId2);
// send started
JobTestHelper.sendStartedEvent(probe, jobActor, stageNo, workerId2);
}
// public static void sendLaunchedInitiatedStartedEventsToWorker(final TestKit probe, final ActorRef jobActor, String jobId,
// int stageNo, WorkerId workerId2) {
// sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, stageNo, workerId2, System.currentTimeMillis() + 1000);
// }
public static void sendHeartBeat(final TestKit probe, final ActorRef jobActor, String jobId, int stageNo, WorkerId workerId2, long time) {
WorkerEvent heartBeat2 = new WorkerHeartbeat(new Status(jobId, stageNo, workerId2.getWorkerIndex(), workerId2.getWorkerNum(), TYPE.HEARTBEAT, "", MantisJobState.Started, time));
jobActor.tell(heartBeat2, probe.getRef());
}
public static void sendWorkerTerminatedEvent(final TestKit probe, final ActorRef jobActor, String jobId, WorkerId workerId2) {
WorkerEvent workerTerminated = new WorkerTerminate(workerId2, WorkerState.Failed, JobCompletedReason.Lost);
jobActor.tell(workerTerminated, probe.getRef());
}
public static void sendWorkerCompletedEvent(final TestKit probe, final ActorRef jobActor, String jobId, WorkerId workerId2) {
WorkerEvent workerCompleted = new WorkerTerminate(workerId2, WorkerState.Completed, JobCompletedReason.Normal);
jobActor.tell(workerCompleted, probe.getRef());
}
public static void sendStartInitiatedEvent(final TestKit probe, final ActorRef jobActor, final int stageNum, WorkerId workerId) {
WorkerEvent startInitEvent = new WorkerStatus(new Status(
workerId.getJobId(),
stageNum,
workerId.getWorkerIndex(),
workerId.getWorkerNum(),
TYPE.INFO,
"test START_INITIATED event",
MantisJobState.StartInitiated
));
jobActor.tell(startInitEvent, probe.getRef());
}
public static void sendStartedEvent(final TestKit probe, final ActorRef jobActor, final int stageNum, WorkerId workerId) {
WorkerEvent startedEvent = new WorkerStatus(new Status(
workerId.getJobId(),
stageNum,
workerId.getWorkerIndex(),
workerId.getWorkerNum(),
TYPE.INFO,
"test STARTED event",
MantisJobState.Started
));
jobActor.tell(startedEvent, probe.getRef());
}
public static void sendJobInitializeEvent(final TestKit probe, final ActorRef jobClusterActor) {
JobProto.InitJob initJobEvent = new JobProto.InitJob(probe.getRef(), true);
jobClusterActor.tell(initJobEvent, probe.getRef());
}
public static void sendWorkerLaunchedEvent(final TestKit probe, final ActorRef jobActor, WorkerId workerId2, int stageNo) {
WorkerEvent launchedEvent2 = new WorkerLaunched(workerId2, stageNo, "host1", "vm1", Optional.empty(), Optional.empty(), new WorkerPorts(Lists.newArrayList(8000, 9000, 9010, 9020, 9030)));
jobActor.tell(launchedEvent2, probe.getRef());
}
public static void killJobAndVerify(final TestKit probe, String clusterName, JobId jobId, ActorRef jobClusterActor) {
jobClusterActor.tell(new JobClusterProto.KillJobRequest(jobId, "test reason", JobCompletedReason.Normal, "nj", probe.getRef()), probe.getRef());
JobClusterManagerProto.KillJobResponse killJobResp = probe.expectMsgClass(JobClusterManagerProto.KillJobResponse.class);
assertEquals(SUCCESS, killJobResp.responseCode);
}
public static void killJobSendWorkerTerminatedAndVerify(final TestKit probe, String clusterName, JobId jobId, ActorRef jobClusterActor, WorkerId workerId) {
jobClusterActor.tell(new JobClusterProto.KillJobRequest(jobId, "test reason", JobCompletedReason.Normal, "nj", probe.getRef()), probe.getRef());
JobClusterManagerProto.KillJobResponse killJobResp = probe.expectMsgClass(JobClusterManagerProto.KillJobResponse.class);
sendWorkerTerminatedEvent(probe, jobClusterActor, jobId.getId(), workerId);
assertEquals(SUCCESS, killJobResp.responseCode);
}
public static void getJobDetailsAndVerify(final TestKit probe, ActorRef jobClusterActor, String jobId, BaseResponse.ResponseCode expectedRespCode, JobState expectedState) {
jobClusterActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef());
JobClusterManagerProto.GetJobDetailsResponse detailsResp = probe.expectMsgClass(Duration.ofSeconds(60), JobClusterManagerProto.GetJobDetailsResponse.class);
if (expectedRespCode == SUCCESS) {
assertEquals(SUCCESS, detailsResp.responseCode);
assertTrue(detailsResp.getJobMetadata().isPresent());
assertEquals(jobId, detailsResp.getJobMetadata().get().getJobId().getId());
assertEquals(expectedState, detailsResp.getJobMetadata().get().getState());
} else {
assertEquals(expectedRespCode, detailsResp.responseCode);
assertFalse(detailsResp.getJobMetadata().isPresent());
}
}
public static boolean verifyJobStatusWithPolling(final TestKit probe, final ActorRef actorRef, final String jobId1, final JobState expectedState) {
boolean result = false;
int cnt = 0;
// try a few times for timing issue
while (cnt < 100 || !result) {
cnt++;
actorRef.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", JobId.fromId(jobId1).get()), probe.getRef());
JobClusterManagerProto.GetJobDetailsResponse detailsResp = probe.expectMsgClass(Duration.ofSeconds(2), JobClusterManagerProto.GetJobDetailsResponse.class);
if (detailsResp.getJobMetadata().isPresent() && expectedState.equals(detailsResp.getJobMetadata().get().getState())) {
result = true;
break;
}
}
return result;
}
public static void submitJobAndVerifySuccess(final TestKit probe, String clusterName, ActorRef jobClusterActor, final JobDefinition jobDefn,
String jobId) {
submitJobAndVerifyStatus(probe, clusterName, jobClusterActor, jobDefn, jobId, SUCCESS);
}
public static void submitJobAndVerifyStatus(final TestKit probe, String clusterName, ActorRef jobClusterActor, @Nullable final JobDefinition jobDefn,
String jobId, ResponseCode code) {
final SubmitJobRequest request;
if (jobDefn == null) {
request = new SubmitJobRequest(clusterName, "user");
} else {
request = new JobClusterManagerProto.SubmitJobRequest(clusterName, "user", jobDefn);
}
jobClusterActor.tell(request, probe.getRef());
JobClusterManagerProto.SubmitJobResponse submitResponse = probe.expectMsgClass(JobClusterManagerProto.SubmitJobResponse.class);
assertEquals(code, submitResponse.responseCode);
if (jobId == null) {
assertTrue(!submitResponse.getJobId().isPresent());
} else {
assertEquals(jobId, submitResponse.getJobId().get().getId());
}
}
public static void scaleStageAndVerify(final TestKit probe, ActorRef jobClusterActor,
String jobId, int stageNum, int numberOfWorkers) {
jobClusterActor.tell(new JobClusterManagerProto.ScaleStageRequest(jobId, stageNum, numberOfWorkers,"user", "testScale"), probe.getRef());
JobClusterManagerProto.ScaleStageResponse scaleResponse = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
assertEquals(SUCCESS, scaleResponse.responseCode);
assertEquals(numberOfWorkers, scaleResponse.getActualNumWorkers());
}
public static ActorRef submitSingleStageScalableJob(ActorSystem system, TestKit probe, String clusterName, SchedulingInfo sInfo,
MantisScheduler schedulerMock, MantisJobStore jobStoreMock,
LifecycleEventPublisher lifecycleEventPublisher) throws io.mantisrx.runtime.command.InvalidJobException {
IJobClusterDefinition jobClusterDefn = JobTestHelper.generateJobClusterDefinition(clusterName, sInfo);
JobDefinition jobDefn = JobTestHelper.generateJobDefinition(clusterName, sInfo);
MantisJobMetadataImpl mantisJobMetaData = new MantisJobMetadataImpl.Builder()
.withJobId(new JobId(clusterName, 1))
.withSubmittedAt(Instant.now())
.withJobState(JobState.Accepted)
.withNextWorkerNumToUse(1)
.withJobDefinition(jobDefn)
.build();
final ActorRef jobActor = system.actorOf(
JobActor.props(jobClusterDefn, mantisJobMetaData, jobStoreMock, schedulerMock,
lifecycleEventPublisher, CostsCalculator.noop()));
jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
JobProto.JobInitialized initMsg = probe.expectMsgClass(JobProto.JobInitialized.class);
assertEquals(SUCCESS, initMsg.responseCode);
String jobId = clusterName + "-1";
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
JobClusterManagerProto.GetJobDetailsResponse resp = probe.expectMsgClass(JobClusterManagerProto.GetJobDetailsResponse.class);
System.out.println("resp " + resp + " msg " + resp.message);
assertEquals(SUCCESS, resp.responseCode);
assertEquals(JobState.Accepted, resp.getJobMetadata().get().getState());
int stageNo = 1;
// send launched event
int lastWorkerNum = 0;
JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId, 0, new WorkerId(jobId, 0, ++lastWorkerNum));
//JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe,jobActor,jobId,1,new WorkerId(jobId,0,2));
for (int i = 0; i < sInfo.forStage(stageNo).getNumberOfInstances(); i++) {
WorkerId workerId = new WorkerId(jobId, i, ++lastWorkerNum);
JobTestHelper.sendWorkerLaunchedEvent(probe, jobActor, workerId, stageNo);
// start initiated event
JobTestHelper.sendStartInitiatedEvent(probe, jobActor, stageNo, workerId);
// send heartbeat
JobTestHelper.sendHeartBeat(probe, jobActor, jobId, stageNo, workerId, System.currentTimeMillis() + 1000);
}
// check job status again
jobActor.tell(new JobClusterManagerProto.GetJobDetailsRequest("nj", JobId.fromId(jobId).get()), probe.getRef());
//jobActor.tell(new JobProto.InitJob(probe.getRef()), probe.getRef());
JobClusterManagerProto.GetJobDetailsResponse resp2 = probe.expectMsgClass(JobClusterManagerProto.GetJobDetailsResponse.class);
System.out.println("resp " + resp2 + " msg " + resp2.message);
assertEquals(SUCCESS, resp2.responseCode);
// 1 worker has started. so job has started
assertEquals(JobState.Launched, resp2.getJobMetadata().get().getState());
return jobActor;
}
@Test
public void testCalculateRuntimeLimitForAlreadyStartedJob() {
Instant now = Instant.now();
Instant startedAt = now.minusSeconds(5);
assertEquals(5, JobHelper.calculateRuntimeDuration(10, startedAt));
}
@Test
public void testCalculateRuntimeLimitForJustStartedJob() {
Instant now = Instant.now();
Instant startedAt = now;
assertEquals(10, JobHelper.calculateRuntimeDuration(10, startedAt));
}
@Test
public void testCalculateRuntimeLimitForAlreadyExpiredJob() {
Instant now = Instant.now();
Instant startedAt = now.minusSeconds(15);
assertEquals(1, JobHelper.calculateRuntimeDuration(10, startedAt));
}
}
| 7,896 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/jobcluster/job/JobClusterManagerTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.jobcluster.job;
import static io.mantisrx.master.jobcluster.JobClusterTest.DEFAULT_JOB_OWNER;
import static io.mantisrx.master.jobcluster.JobClusterTest.NO_OP_SLA;
import static io.mantisrx.master.jobcluster.JobClusterTest.TWO_WORKER_SCHED_INFO;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_CONFLICT;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR_NOT_FOUND;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS;
import static io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS_CREATED;
import static java.util.Optional.empty;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.testkit.javadsl.TestKit;
import com.netflix.mantis.master.scheduler.TestHelpers;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import io.mantisrx.common.Label;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.master.JobClustersManagerActor;
import io.mantisrx.master.events.AuditEventSubscriberLoggingImpl;
import io.mantisrx.master.events.LifecycleEventPublisher;
import io.mantisrx.master.events.LifecycleEventPublisherImpl;
import io.mantisrx.master.events.StatusEventSubscriberLoggingImpl;
import io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl;
import io.mantisrx.master.jobcluster.MantisJobClusterMetadataView;
import io.mantisrx.master.jobcluster.job.worker.IMantisWorkerMetadata;
import io.mantisrx.master.jobcluster.job.worker.JobWorker;
import io.mantisrx.master.jobcluster.job.worker.WorkerHeartbeat;
import io.mantisrx.master.jobcluster.job.worker.WorkerState;
import io.mantisrx.master.jobcluster.job.worker.WorkerStatus;
import io.mantisrx.master.jobcluster.proto.BaseResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.DisableJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.EnableJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobClusterResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetJobDetailsResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.JobClustersManagerInitializeResponse;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterArtifactRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterLabelsRequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterSLARequest;
import io.mantisrx.master.jobcluster.proto.JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest;
import io.mantisrx.runtime.JobOwner;
import io.mantisrx.runtime.JobSla;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.WorkerMigrationConfig;
import io.mantisrx.runtime.WorkerMigrationConfig.MigrationStrategyEnum;
import io.mantisrx.runtime.command.InvalidJobException;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.server.core.JobCompletedReason;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.Status.TYPE;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.domain.IJobClusterDefinition;
import io.mantisrx.server.master.domain.JobClusterConfig;
import io.mantisrx.server.master.domain.JobClusterDefinitionImpl;
import io.mantisrx.server.master.domain.JobDefinition;
import io.mantisrx.server.master.domain.JobId;
import io.mantisrx.server.master.domain.SLA;
import io.mantisrx.server.master.persistence.KeyValueBasedPersistenceProvider;
import io.mantisrx.server.master.persistence.MantisJobStore;
import io.mantisrx.server.master.scheduler.MantisScheduler;
import io.mantisrx.server.master.scheduler.MantisSchedulerFactory;
import io.mantisrx.server.master.scheduler.WorkerEvent;
import io.mantisrx.server.master.scheduler.WorkerLaunched;
import io.mantisrx.server.master.store.FileBasedStore;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.io.IOException;
import java.net.MalformedURLException;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.Timeout;
import org.mockito.Mockito;
import rx.schedulers.Schedulers;
import rx.subjects.BehaviorSubject;
public class JobClusterManagerTest {
static ActorSystem system;
private MantisJobStore jobStoreMock;
private ActorRef jobClusterManagerActor;
private MantisSchedulerFactory schedulerMockFactory;
private MantisScheduler schedulerMock;
private static LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(
new AuditEventSubscriberLoggingImpl(),
new StatusEventSubscriberLoggingImpl(),
new WorkerEventSubscriberLoggingImpl());
private final CostsCalculator costsCalculator = CostsCalculator.noop();
private static final String user = "nj";
@Rule
public TemporaryFolder rootDir = new TemporaryFolder();
@Rule
public Timeout globalTimeout = new Timeout(2000);
@BeforeClass
public static void setup() {
Config config = ConfigFactory.parseString("akka {\n" +
" loggers = [\"akka.testkit.TestEventListener\"]\n" +
" loglevel = \"WARNING\"\n" +
" stdout-loglevel = \"WARNING\"\n" +
"}\n");
system = ActorSystem.create(
"JobClusterManagerTest",
config.withFallback(ConfigFactory.load()));
TestHelpers.setupMasterConfig();
}
@Before
public void setupState() {
jobStoreMock = mock(MantisJobStore.class);
schedulerMockFactory = mock(MantisSchedulerFactory.class);
schedulerMock = mock(MantisScheduler.class);
when(schedulerMockFactory.forJob(any())).thenReturn(schedulerMock);
jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props(
jobStoreMock,
eventPublisher,
costsCalculator));
jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(
schedulerMockFactory,
true), ActorRef.noSender());
}
@AfterClass
public static void tearDown() {
JobTestHelper.deleteAllFiles();
TestKit.shutdownActorSystem(system);
system = null;
}
private JobClusterDefinitionImpl createFakeJobClusterDefn(
final String name,
List<Label> labels) {
return createFakeJobClusterDefn(name, labels, WorkerMigrationConfig.DEFAULT);
}
private JobClusterDefinitionImpl createFakeJobClusterDefn(
final String name,
List<Label> labels,
WorkerMigrationConfig migrationConfig) {
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName("myart")
.withSchedulingInfo(new SchedulingInfo.Builder().numberOfStages(1)
.singleWorkerStageWithConstraints(
new MachineDefinition(
0,
0,
0,
0,
0),
Lists.newArrayList(),
Lists.newArrayList())
.build())
.withVersion("0.0.1")
.build();
return new JobClusterDefinitionImpl.Builder()
.withName(name)
.withUser(user)
.withJobClusterConfig(clusterConfig)
.withParameters(Lists.newArrayList())
.withLabels(labels)
.withSla(new SLA(0, 1, null, IJobClusterDefinition.CronPolicy.KEEP_EXISTING))
.withIsReadyForJobMaster(true)
.withOwner(new JobOwner("Nick", "Mantis", "desc", "nma@netflix.com", "repo"))
.withMigrationConfig(migrationConfig)
.build();
}
private JobDefinition createJob(String name2) throws InvalidJobException {
return new JobDefinition.Builder()
.withName(name2)
.withParameters(Lists.newArrayList())
.withLabels(Lists.newArrayList())
.withSchedulingInfo(new SchedulingInfo.Builder().numberOfStages(1)
.singleWorkerStageWithConstraints(
new MachineDefinition(
1,
10,
10,
10,
2),
Lists.newArrayList(),
Lists.newArrayList())
.build())
.withArtifactName("myart")
.withSubscriptionTimeoutSecs(0)
.withUser("njoshi")
.withJobSla(new JobSla(0, 0, null, MantisJobDurationType.Transient, null))
.build();
}
private void createJobClusterAndAssert(ActorRef jobClusterManagerActor, String clusterName) {
createJobClusterAndAssert(
jobClusterManagerActor,
clusterName,
WorkerMigrationConfig.DEFAULT);
}
private void createJobClusterAndAssert(
ActorRef jobClusterManagerActor,
String clusterName,
WorkerMigrationConfig migrationConfig) {
TestKit probe = new TestKit(system);
JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
Lists.newArrayList(),
migrationConfig);
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(resp.toString(), SUCCESS_CREATED, resp.responseCode);
}
private void submitJobAndAssert(ActorRef jobClusterManagerActor, String cluster) {
TestKit probe = new TestKit(system);
JobDefinition jobDefn;
try {
jobDefn = createJob(cluster);
jobClusterManagerActor.tell(
new JobClusterManagerProto.SubmitJobRequest(
cluster,
"me",
jobDefn),
probe.getRef());
JobClusterManagerProto.SubmitJobResponse submitResp = probe.expectMsgClass(
JobClusterManagerProto.SubmitJobResponse.class);
assertEquals(SUCCESS, submitResp.responseCode);
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
}
@Test
public void testBootStrapJobClustersAndJobs1() {
TestKit probe = new TestKit(system);
JobTestHelper.deleteAllFiles();
MantisJobStore jobStore = new MantisJobStore(new KeyValueBasedPersistenceProvider(
new FileBasedStore(rootDir.getRoot()),
eventPublisher));
MantisJobStore jobStoreSpied = Mockito.spy(jobStore);
// MantisScheduler schedulerMock = mock(MantisScheduler.class);
ActorRef jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props(
jobStoreSpied,
eventPublisher,
costsCalculator));
jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(
schedulerMockFactory,
true), probe.getRef());
JobClustersManagerInitializeResponse iResponse = probe.expectMsgClass(Duration.of(
10,
ChronoUnit.MINUTES),
JobClustersManagerInitializeResponse.class);
//List<String> clusterNames = Lists.newArrayList("testBootStrapJobClustersAndJobs1");
String clusterWithNoJob = "testBootStrapJobClusterWithNoJob";
createJobClusterAndAssert(jobClusterManagerActor, clusterWithNoJob);
// kill 1 of the jobs to test archive path
// Stop job cluster Manager Actor
system.stop(jobClusterManagerActor);
// create new instance
jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props(
jobStore,
eventPublisher,
costsCalculator));
// initialize it
jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(
schedulerMockFactory,
true), probe.getRef());
//JobClusterManagerProto.JobClustersManagerInitializeResponse initializeResponse = probe.expectMsgClass(JobClusterManagerProto.JobClustersManagerInitializeResponse.class);
JobClustersManagerInitializeResponse initializeResponse = probe.expectMsgClass(Duration.of(
10,
ChronoUnit.MINUTES), JobClustersManagerInitializeResponse.class);
assertEquals(SUCCESS, initializeResponse.responseCode);
jobClusterManagerActor.tell(new GetJobClusterRequest(clusterWithNoJob), probe.getRef());
GetJobClusterResponse jobClusterResponse = probe.expectMsgClass(Duration.of(
10,
ChronoUnit.MINUTES),
GetJobClusterResponse.class);
assertEquals(SUCCESS, jobClusterResponse.responseCode);
assertTrue(jobClusterResponse.getJobCluster().isPresent());
assertEquals(clusterWithNoJob, jobClusterResponse.getJobCluster().get().getName());
// // 1 running worker
// verify(schedulerMock,timeout(100_1000).times(1)).initializeRunningWorker(any(),any());
//
// // 2 worker schedule requests
// verify(schedulerMock,timeout(100_000).times(4)).scheduleWorker(any());
try {
Mockito.verify(jobStoreSpied).loadAllJobClusters();
Mockito.verify(jobStoreSpied).loadAllActiveJobs();
} catch (IOException e) {
e.printStackTrace();
fail();
}
}
@Test
public void testBootStrapJobClustersAndJobsNegativeTest() throws IOException {
TestKit probe = new TestKit(system);
JobTestHelper.deleteAllFiles();
KeyValueBasedPersistenceProvider storageProviderAdapter = mock(
KeyValueBasedPersistenceProvider.class);
when(storageProviderAdapter.loadAllJobClusters()).thenThrow(new IOException(
"StorageException"));
MantisJobStore jobStore = new MantisJobStore(storageProviderAdapter);
MantisJobStore jobStoreSpied = Mockito.spy(jobStore);
// MantisScheduler schedulerMock = mock(MantisScheduler.class);
ActorRef jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props(
jobStoreSpied,
eventPublisher,
costsCalculator));
jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(
schedulerMockFactory,
true), probe.getRef());
JobClustersManagerInitializeResponse iResponse = probe.expectMsgClass(Duration.of(
10,
ChronoUnit.MINUTES),
JobClustersManagerInitializeResponse.class);
assertEquals(BaseResponse.ResponseCode.SERVER_ERROR, iResponse.responseCode);
}
@Test
public void testBootStrapJobClustersAndJobs() {
TestKit probe = new TestKit(system);
JobTestHelper.deleteAllFiles();
MantisJobStore jobStore = new MantisJobStore(new KeyValueBasedPersistenceProvider(
new FileBasedStore(rootDir.getRoot()),
eventPublisher));
MantisJobStore jobStoreSpied = Mockito.spy(jobStore);
ActorRef jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props(
jobStoreSpied,
eventPublisher,
costsCalculator));
jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(
schedulerMockFactory,
false), probe.getRef());
JobClustersManagerInitializeResponse iResponse = probe.expectMsgClass(Duration.of(
10,
ChronoUnit.MINUTES),
JobClustersManagerInitializeResponse.class);
List<String> clusterNames = Lists.newArrayList("testBootStrapJobClustersAndJobs1",
"testBootStrapJobClustersAndJobs2",
"testBootStrapJobClustersAndJobs3");
String clusterWithNoJob = "testBootStrapJobClusterWithNoJob";
createJobClusterAndAssert(jobClusterManagerActor, clusterWithNoJob);
WorkerMigrationConfig migrationConfig = new WorkerMigrationConfig(
MigrationStrategyEnum.PERCENTAGE,
"{\"percentToMove\":60, \"intervalMs\":30000}");
// Create 3 clusters and submit 1 job each
for (String cluster : clusterNames) {
createJobClusterAndAssert(jobClusterManagerActor, cluster, migrationConfig);
submitJobAndAssert(jobClusterManagerActor, cluster);
if (cluster.equals("testBootStrapJobClustersAndJobs1")) {
// send worker events for job 1 so it goes to started state
String jobId = "testBootStrapJobClustersAndJobs1-1";
WorkerId workerId = new WorkerId(jobId, 0, 1);
WorkerEvent launchedEvent = new WorkerLaunched(
workerId,
0,
"host1",
"vm1",
empty(),
Optional.empty(),
new WorkerPorts(Lists.newArrayList(8000, 9000, 9010, 9020, 9030)));
jobClusterManagerActor.tell(launchedEvent, probe.getRef());
WorkerEvent startInitEvent = new WorkerStatus(new Status(
workerId.getJobId(),
1,
workerId.getWorkerIndex(),
workerId.getWorkerNum(),
TYPE.INFO,
"test START_INIT",
MantisJobState.StartInitiated));
jobClusterManagerActor.tell(startInitEvent, probe.getRef());
WorkerEvent heartBeat = new WorkerHeartbeat(new Status(
jobId,
1,
workerId.getWorkerIndex(),
workerId.getWorkerNum(),
TYPE.HEARTBEAT,
"",
MantisJobState.Started));
jobClusterManagerActor.tell(heartBeat, probe.getRef());
// get Job status
jobClusterManagerActor.tell(
new GetJobDetailsRequest(
"user",
JobId.fromId(jobId).get()),
probe.getRef());
GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
// Ensure its launched
assertEquals(SUCCESS, resp2.responseCode);
assertEquals(JobState.Launched, resp2.getJobMetadata().get().getState());
}
}
// kill 1 of the jobs to test archive path
JobClusterManagerProto.KillJobRequest killRequest = new JobClusterManagerProto.KillJobRequest(
"testBootStrapJobClustersAndJobs2-1",
JobCompletedReason.Killed.toString(),
"njoshi");
jobClusterManagerActor.tell(killRequest, probe.getRef());
JobClusterManagerProto.KillJobResponse killJobResponse = probe.expectMsgClass(
JobClusterManagerProto.KillJobResponse.class);
assertEquals(SUCCESS, killJobResponse.responseCode);
JobTestHelper.sendWorkerTerminatedEvent(
probe,
jobClusterManagerActor,
"testBootStrapJobClustersAndJobs2-1",
new WorkerId("testBootStrapJobClustersAndJobs2-1",
0,
1));
try {
Thread.sleep(500);
} catch (InterruptedException e) {
e.printStackTrace();
}
// Stop job cluster Manager Actor
system.stop(jobClusterManagerActor);
// create new instance
jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props(
jobStoreSpied,
eventPublisher,
costsCalculator));
// initialize it
jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(
schedulerMockFactory,
true), probe.getRef());
JobClustersManagerInitializeResponse initializeResponse = probe.expectMsgClass(
JobClustersManagerInitializeResponse.class);
//probe.expectMsgClass(Duration.of(10, ChronoUnit.MINUTES),JobClusterManagerProto.JobClustersManagerInitializeResponse.class);
//probe.expectMsgClass(JobClusterManagerProto.JobClustersManagerInitializeResponse.class);
assertEquals(SUCCESS, initializeResponse.responseCode);
// Get Cluster Config
jobClusterManagerActor.tell(new GetJobClusterRequest("testBootStrapJobClustersAndJobs1"),
probe.getRef());
GetJobClusterResponse clusterResponse = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, clusterResponse.responseCode);
assertTrue(clusterResponse.getJobCluster().isPresent());
WorkerMigrationConfig mConfig = clusterResponse.getJobCluster().get().getMigrationConfig();
assertEquals(migrationConfig.getStrategy(), mConfig.getStrategy());
assertEquals(migrationConfig.getConfigString(), migrationConfig.getConfigString());
// get Job status
jobClusterManagerActor.tell(new GetJobDetailsRequest(
"user",
JobId.fromId("testBootStrapJobClustersAndJobs1-1")
.get()), probe.getRef());
GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
// Ensure its launched
System.out.println("Resp2 -> " + resp2.message);
assertEquals(SUCCESS, resp2.responseCode);
assertEquals(JobState.Launched, resp2.getJobMetadata().get().getState());
// 1 jobs should be in completed state
jobClusterManagerActor.tell(new GetJobDetailsRequest(
"user",
JobId.fromId("testBootStrapJobClustersAndJobs2-1")
.get()), probe.getRef());
resp2 = probe.expectMsgClass(Duration.of(10, ChronoUnit.MINUTES),
GetJobDetailsResponse.class);
//TODO(hmitnflx): Need to fix this test after support completed jobs async loading
// Ensure its completed
assertEquals(CLIENT_ERROR_NOT_FOUND, resp2.responseCode);
// assertEquals(JobState.Completed, resp2.getJobMetadata().get().getState());
jobClusterManagerActor.tell(new GetJobDetailsRequest(
"user",
JobId.fromId("testBootStrapJobClustersAndJobs3-1")
.get()), probe.getRef());
resp2 = probe.expectMsgClass(Duration.of(10, ChronoUnit.MINUTES),
GetJobDetailsResponse.class);
// Ensure its Accepted
assertEquals(SUCCESS, resp2.responseCode);
assertEquals(JobState.Accepted, resp2.getJobMetadata().get().getState());
try {
Optional<JobWorker> workerByIndex = resp2.getJobMetadata().get().getWorkerByIndex(1, 0);
assertTrue(workerByIndex.isPresent());
Optional<IMantisStageMetadata> stageMetadata = resp2.getJobMetadata()
.get()
.getStageMetadata(1);
assertTrue(stageMetadata.isPresent());
JobWorker workerByIndex1 = stageMetadata.get().getWorkerByIndex(0);
System.out.println("Got worker by index : " + workerByIndex1);
Optional<JobWorker> worker = resp2.getJobMetadata().get().getWorkerByNumber(1);
assertTrue(worker.isPresent());
} catch (io.mantisrx.server.master.persistence.exceptions.InvalidJobException e) {
e.printStackTrace();
}
jobClusterManagerActor.tell(new GetLastSubmittedJobIdStreamRequest(
"testBootStrapJobClustersAndJobs1"), probe.getRef());
GetLastSubmittedJobIdStreamResponse lastSubmittedJobIdStreamResponse = probe.expectMsgClass(
Duration.of(10, ChronoUnit.MINUTES),
GetLastSubmittedJobIdStreamResponse.class);
lastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject()
.get()
.take(1)
.toBlocking()
.subscribe((jId) -> {
assertEquals(new JobId(
"testBootStrapJobClustersAndJobs1",
1), jId);
});
jobClusterManagerActor.tell(new GetJobClusterRequest(clusterWithNoJob), probe.getRef());
GetJobClusterResponse jobClusterResponse = probe.expectMsgClass(Duration.of(
10,
ChronoUnit.MINUTES),
GetJobClusterResponse.class);
assertEquals(SUCCESS, jobClusterResponse.responseCode);
assertTrue(jobClusterResponse.getJobCluster().isPresent());
assertEquals(clusterWithNoJob, jobClusterResponse.getJobCluster().get().getName());
// 1 running worker
verify(schedulerMock, timeout(100_1000).times(1)).initializeRunningWorker(any(), any(),
any());
// 2 worker schedule requests
verify(schedulerMock, timeout(100_000).times(4)).scheduleWorker(any());
try {
Mockito.verify(jobStoreSpied).loadAllArchivedJobsAsync();
Mockito.verify(jobStoreSpied).loadAllActiveJobs();
Mockito.verify(jobStoreSpied).archiveWorker(any());
Mockito.verify(jobStoreSpied).archiveJob(any());
} catch (IOException e) {
e.printStackTrace();
fail();
}
}
/**
* Case for a master leader re-election when a new master re-hydrates corrupted job worker
* metadata.
*/
@Test
public void testBootstrapJobClusterAndJobsWithCorruptedWorkerPorts()
throws IOException, io.mantisrx.server.master.persistence.exceptions.InvalidJobException {
TestKit probe = new TestKit(system);
JobTestHelper.deleteAllFiles();
MantisJobStore jobStore = new MantisJobStore(new KeyValueBasedPersistenceProvider(
new FileBasedStore(rootDir.getRoot()),
eventPublisher));
MantisJobStore jobStoreSpied = Mockito.spy(jobStore);
// MantisScheduler schedulerMock = mock(MantisScheduler.class);
ActorRef jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props(
jobStoreSpied,
eventPublisher,
costsCalculator));
jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(
schedulerMockFactory,
false), probe.getRef());
probe.expectMsgClass(Duration.of(
10,
ChronoUnit.MINUTES),
JobClustersManagerInitializeResponse.class);
String jobClusterName = "testBootStrapJobClustersAndJobs1";
WorkerMigrationConfig migrationConfig = new WorkerMigrationConfig(
MigrationStrategyEnum.PERCENTAGE,
"{\"percentToMove\":60, \"intervalMs\":30000}");
createJobClusterAndAssert(jobClusterManagerActor, jobClusterName, migrationConfig);
submitJobAndAssert(jobClusterManagerActor, jobClusterName);
String jobId = "testBootStrapJobClustersAndJobs1-1";
WorkerId workerId = new WorkerId(jobId, 0, 1);
WorkerEvent launchedEvent = new WorkerLaunched(
workerId,
0,
"host1",
"vm1",
empty(),
Optional.empty(),
new WorkerPorts(Lists.newArrayList(8000, 9000, 9010, 9020, 9030)));
jobClusterManagerActor.tell(launchedEvent, probe.getRef());
WorkerEvent startInitEvent = new WorkerStatus(new Status(
workerId.getJobId(),
1,
workerId.getWorkerIndex(),
workerId.getWorkerNum(),
TYPE.INFO,
"test START_INIT",
MantisJobState.StartInitiated));
jobClusterManagerActor.tell(startInitEvent, probe.getRef());
WorkerEvent heartBeat = new WorkerHeartbeat(new Status(
jobId,
1,
workerId.getWorkerIndex(),
workerId.getWorkerNum(),
TYPE.HEARTBEAT,
"",
MantisJobState.Started));
jobClusterManagerActor.tell(heartBeat, probe.getRef());
// get Job status
jobClusterManagerActor.tell(
new GetJobDetailsRequest(
"user",
JobId.fromId(jobId).get()),
probe.getRef());
GetJobDetailsResponse resp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
// Ensure its launched
assertEquals(SUCCESS, resp2.responseCode);
JobWorker worker = new JobWorker.Builder()
.withWorkerIndex(0)
.withWorkerNumber(1)
.withJobId(jobId)
.withStageNum(1)
.withNumberOfPorts(5)
.withWorkerPorts(null)
.withState(WorkerState.Started)
.withLifecycleEventsPublisher(eventPublisher)
.build();
jobStoreSpied.updateWorker(worker.getMetadata());
// Stop job cluster Manager Actor
system.stop(jobClusterManagerActor);
// create new instance
jobClusterManagerActor = system.actorOf(JobClustersManagerActor.props(
jobStoreSpied,
eventPublisher,
costsCalculator));
// initialize it
jobClusterManagerActor.tell(new JobClusterManagerProto.JobClustersManagerInitialize(
schedulerMockFactory,
true), probe.getRef());
JobClustersManagerInitializeResponse initializeResponse = probe.expectMsgClass(
JobClustersManagerInitializeResponse.class);
assertEquals(SUCCESS, initializeResponse.responseCode);
WorkerId newWorkerId = new WorkerId(jobId, 0, 11);
launchedEvent = new WorkerLaunched(
newWorkerId,
0,
"host1",
"vm1",
empty(),
Optional.empty(),
new WorkerPorts(Lists.newArrayList(8000, 9000, 9010, 9020, 9030)));
jobClusterManagerActor.tell(launchedEvent, probe.getRef());
// Get Cluster Config
jobClusterManagerActor.tell(new GetJobClusterRequest("testBootStrapJobClustersAndJobs1"),
probe.getRef());
GetJobClusterResponse clusterResponse = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, clusterResponse.responseCode);
assertTrue(clusterResponse.getJobCluster().isPresent());
WorkerMigrationConfig mConfig = clusterResponse.getJobCluster().get().getMigrationConfig();
assertEquals(migrationConfig.getStrategy(), mConfig.getStrategy());
assertEquals(migrationConfig.getConfigString(), migrationConfig.getConfigString());
// get Job status
jobClusterManagerActor.tell(new GetJobDetailsRequest(
"user",
JobId.fromId("testBootStrapJobClustersAndJobs1-1")
.get()), probe.getRef());
resp2 = probe.expectMsgClass(GetJobDetailsResponse.class);
// Ensure its launched
assertEquals(SUCCESS, resp2.responseCode);
assertEquals(JobState.Launched, resp2.getJobMetadata().get().getState());
IMantisWorkerMetadata mantisWorkerMetadata = resp2.getJobMetadata().get()
.getWorkerByIndex(1, 0).get()
.getMetadata();
assertNotNull(mantisWorkerMetadata.getWorkerPorts());
assertEquals(11, mantisWorkerMetadata.getWorkerNumber());
assertEquals(1, mantisWorkerMetadata.getTotalResubmitCount());
jobClusterManagerActor.tell(new GetLastSubmittedJobIdStreamRequest(
"testBootStrapJobClustersAndJobs1"), probe.getRef());
GetLastSubmittedJobIdStreamResponse lastSubmittedJobIdStreamResponse = probe.expectMsgClass(
Duration.of(10, ChronoUnit.MINUTES),
GetLastSubmittedJobIdStreamResponse.class);
lastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject()
.get()
.take(1)
.toBlocking()
.subscribe((jId) -> {
assertEquals(new JobId(
"testBootStrapJobClustersAndJobs1",
1), jId);
});
// Two schedules: one for the initial success, one for a resubmit from corrupted worker ports.
verify(schedulerMock, times(2)).scheduleWorker(any());
// One unschedule from corrupted worker ID 1 (before the resubmit).
verify(schedulerMock, times(1)).unscheduleAndTerminateWorker(eq(workerId), any());
try {
Mockito.verify(jobStoreSpied).loadAllArchivedJobsAsync();
Mockito.verify(jobStoreSpied).loadAllActiveJobs();
Mockito.verify(jobStoreSpied).archiveWorker(any());
} catch (IOException e) {
e.printStackTrace();
fail();
}
}
@Test
public void testJobClusterCreate() throws MalformedURLException {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterCreateCluster";
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
Lists.newArrayList());
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, resp.responseCode);
jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse resp2 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp2.responseCode);
assertEquals(clusterName, resp2.getJobCluster().get().getName());
//assertEquals(jobClusterManagerActor, probe.getLastSender().path());
}
@Test
public void testJobClusterCreateDupFails() throws MalformedURLException {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterCreateDupFails";
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
Lists.newArrayList());
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, resp.responseCode);
jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse resp2 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp2.responseCode);
assertEquals(clusterName, resp2.getJobCluster().get().getName());
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse resp3 = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
System.out.println("Got resp -> " + resp3);
assertEquals(CLIENT_ERROR_CONFLICT, resp3.responseCode);
// make sure first cluster is still there
jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse resp4 = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, resp4.responseCode);
assertEquals(clusterName, resp4.getJobCluster().get().getName());
//assertEquals(jobClusterManagerActor, probe.getLastSender().path());
}
@Test
public void testListJobClusters() {
TestKit probe = new TestKit(system);
String clusterName = "testListJobClusters";
JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
Lists.newArrayList());
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, resp.responseCode);
String clusterName2 = "testListJobClusters2";
fakeJobCluster = createFakeJobClusterDefn(clusterName2, Lists.newArrayList());
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
resp = probe.expectMsgClass(JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, resp.responseCode);
jobClusterManagerActor.tell(
new JobClusterManagerProto.ListJobClustersRequest(),
probe.getRef());
JobClusterManagerProto.ListJobClustersResponse resp2 = probe.expectMsgClass(
JobClusterManagerProto.ListJobClustersResponse.class);
assertTrue(2 <= resp2.getJobClusters().size());
List<MantisJobClusterMetadataView> jClusters = resp2.getJobClusters();
int cnt = 0;
for (MantisJobClusterMetadataView jCluster : jClusters) {
if (jCluster.getName().equals(clusterName) || jCluster.getName().equals(clusterName2)) {
cnt++;
}
}
assertEquals(2, cnt);
}
@Test
public void testListJobs() throws InvalidJobException {
TestKit probe = new TestKit(system);
//create cluster 1
String clusterName = "testListJobs";
JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
Lists.newArrayList());
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, resp.responseCode);
// submit job to this cluster
JobDefinition jobDefn = createJob(clusterName);
jobClusterManagerActor.tell(
new JobClusterManagerProto.SubmitJobRequest(
clusterName,
"me",
jobDefn),
probe.getRef());
JobClusterManagerProto.SubmitJobResponse submitResp = probe.expectMsgClass(
JobClusterManagerProto.SubmitJobResponse.class);
assertEquals(SUCCESS, submitResp.responseCode);
// create cluster 2
String clusterName2 = "testListJobs2";
fakeJobCluster = createFakeJobClusterDefn(clusterName2, Lists.newArrayList());
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
resp = probe.expectMsgClass(JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, resp.responseCode);
// submit job to this cluster
jobDefn = createJob(clusterName2);
jobClusterManagerActor.tell(
new JobClusterManagerProto.SubmitJobRequest(
clusterName2,
"me",
jobDefn),
probe.getRef());
submitResp = probe.expectMsgClass(JobClusterManagerProto.SubmitJobResponse.class);
assertEquals(SUCCESS, submitResp.responseCode);
jobClusterManagerActor.tell(new JobClusterManagerProto.ListJobsRequest(), probe.getRef());
JobClusterManagerProto.ListJobsResponse listResp = probe.expectMsgClass(
JobClusterManagerProto.ListJobsResponse.class);
System.out.println("Got " + listResp.getJobList().size());
boolean foundJob1 = false;
boolean foundJob2 = false;
for (MantisJobMetadataView v : listResp.getJobList()) {
System.out.println("Job -> " + v.getJobMetadata().getJobId());
String jId = v.getJobMetadata().getJobId();
if (jId.equals("testListJobs-1")) {
foundJob1 = true;
} else if (jId.equals("testListJobs2-1")) {
foundJob2 = true;
}
}
assertTrue(listResp.getJobList().size() >= 2);
assertTrue(foundJob1 && foundJob2);
}
@Test
public void testJobClusterUpdateAndDelete() throws MalformedURLException {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterUpdateAndDeleteCluster";
List<Label> labels = Lists.newLinkedList();
Label l = new Label("labelname", "labelvalue");
labels.add(l);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
labels);
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, createResp.responseCode);
JobClusterConfig clusterConfig = new JobClusterConfig.Builder()
.withArtifactName("myart2")
.withSchedulingInfo(TWO_WORKER_SCHED_INFO)
.withVersion("0.0.2")
.build();
final JobClusterDefinitionImpl updatedFakeJobCluster = new JobClusterDefinitionImpl.Builder()
.withJobClusterConfig(clusterConfig)
.withName(clusterName)
.withParameters(Lists.newArrayList())
.withUser(user)
.withIsReadyForJobMaster(true)
.withOwner(DEFAULT_JOB_OWNER)
.withMigrationConfig(WorkerMigrationConfig.DEFAULT)
.withSla(NO_OP_SLA)
.build();
jobClusterManagerActor.tell(new JobClusterManagerProto.UpdateJobClusterRequest(
updatedFakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.UpdateJobClusterResponse updateResp = probe.expectMsgClass(
JobClusterManagerProto.UpdateJobClusterResponse.class);
if (SUCCESS != updateResp.responseCode) {
System.out.println("Update cluster response: " + updateResp);
}
assertEquals(SUCCESS, updateResp.responseCode);
// assertEquals(jobClusterManagerActor, probe.getLastSender());
jobClusterManagerActor.tell(
new JobClusterManagerProto.DeleteJobClusterRequest(user,
clusterName),
probe.getRef());
JobClusterManagerProto.DeleteJobClusterResponse deleteResp = probe.expectMsgClass(
JobClusterManagerProto.DeleteJobClusterResponse.class);
assertEquals(SUCCESS, deleteResp.responseCode);
// assertEquals(jobClusterManagerActor, probe.getLastSender());
}
@Test
public void testJobClusterSLAUpdate() throws MalformedURLException {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterSLAUpdate";
List<Label> labels = Lists.newLinkedList();
Label l = new Label("labelname", "labelvalue");
labels.add(l);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
labels);
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, createResp.responseCode);
UpdateJobClusterSLARequest req = new JobClusterManagerProto.UpdateJobClusterSLARequest(
clusterName,
1,
2,
"user");
jobClusterManagerActor.tell(req, probe.getRef());
JobClusterManagerProto.UpdateJobClusterSLAResponse updateResp = probe.expectMsgClass(
JobClusterManagerProto.UpdateJobClusterSLAResponse.class);
assertEquals(SUCCESS, updateResp.responseCode);
// assertEquals(jobClusterManagerActor, probe.getLastSender());
jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse getResp = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, getResp.responseCode);
assertEquals(1, getResp.getJobCluster().get().getSla().getMin());
assertEquals(2, getResp.getJobCluster().get().getSla().getMax());
// assertEquals(jobClusterManagerActor, probe.getLastSender());
}
@Test
public void testJobClusterLabelUpdate() throws MalformedURLException {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterLabelUpdate";
List<Label> labels = Lists.newLinkedList();
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
labels);
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, createResp.responseCode);
List<Label> labels2 = Lists.newLinkedList();
Label l = new Label("labelname", "labelvalue");
labels2.add(l);
UpdateJobClusterLabelsRequest req = new JobClusterManagerProto.UpdateJobClusterLabelsRequest(
clusterName,
labels2,
"user");
jobClusterManagerActor.tell(req, probe.getRef());
JobClusterManagerProto.UpdateJobClusterLabelsResponse updateResp = probe.expectMsgClass(
JobClusterManagerProto.UpdateJobClusterLabelsResponse.class);
assertEquals(SUCCESS, updateResp.responseCode);
jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse getResp = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, getResp.responseCode);
assertEquals(1, getResp.getJobCluster().get().getLabels().size());
assertEquals(l, getResp.getJobCluster().get().getLabels().get(0));
}
@Test
public void testJobClusterArtifactUpdate() throws MalformedURLException {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterArtifactUpdate";
List<Label> labels = Lists.newLinkedList();
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
labels);
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, createResp.responseCode);
UpdateJobClusterArtifactRequest req = new JobClusterManagerProto.UpdateJobClusterArtifactRequest(
clusterName,
"myjar",
"1.0.1",
true,
"user");
jobClusterManagerActor.tell(req, probe.getRef());
JobClusterManagerProto.UpdateJobClusterArtifactResponse updateResp = probe.expectMsgClass(
JobClusterManagerProto.UpdateJobClusterArtifactResponse.class);
assertEquals(SUCCESS, updateResp.responseCode);
jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse getResp = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, getResp.responseCode);
//assertEquals("myjar", getResp.getJobCluster().get().g.getArtifactName());
assertEquals("1.0.1", getResp.getJobCluster().get().getLatestVersion());
}
@Test
public void testJobClusterWorkerMigrationUpdate() throws MalformedURLException {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterWorkerMigrationUpdate";
List<Label> labels = Lists.newLinkedList();
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
labels);
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, createResp.responseCode);
UpdateJobClusterWorkerMigrationStrategyRequest req = new JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyRequest(
clusterName,
new WorkerMigrationConfig(MigrationStrategyEnum.ONE_WORKER, "{}"),
clusterName);
jobClusterManagerActor.tell(req, probe.getRef());
JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse updateResp = probe.expectMsgClass(
JobClusterManagerProto.UpdateJobClusterWorkerMigrationStrategyResponse.class);
assertEquals(SUCCESS, updateResp.responseCode);
jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse getResp = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, getResp.responseCode);
assertEquals(
MigrationStrategyEnum.ONE_WORKER,
getResp.getJobCluster().get().getMigrationConfig().getStrategy());
}
@Test
public void testJobClusterDisable() throws MalformedURLException {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterDisable";
List<Label> labels = Lists.newLinkedList();
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
labels);
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, createResp.responseCode);
DisableJobClusterRequest req = new JobClusterManagerProto.DisableJobClusterRequest(
clusterName,
"user");
jobClusterManagerActor.tell(req, probe.getRef());
JobClusterManagerProto.DisableJobClusterResponse updateResp = probe.expectMsgClass(
JobClusterManagerProto.DisableJobClusterResponse.class);
assertEquals(SUCCESS, updateResp.responseCode);
jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse getResp = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, getResp.responseCode);
assertTrue(getResp.getJobCluster().get().isDisabled());
}
@Test
public void testJobClusterEnable() throws MalformedURLException {
TestKit probe = new TestKit(system);
String clusterName = "testJobClusterEnable";
List<Label> labels = Lists.newLinkedList();
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
labels);
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse createResp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
assertEquals(SUCCESS_CREATED, createResp.responseCode);
DisableJobClusterRequest req = new JobClusterManagerProto.DisableJobClusterRequest(
clusterName,
"user");
jobClusterManagerActor.tell(req, probe.getRef());
JobClusterManagerProto.DisableJobClusterResponse updateResp = probe.expectMsgClass(
JobClusterManagerProto.DisableJobClusterResponse.class);
assertEquals(SUCCESS, updateResp.responseCode);
jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
GetJobClusterResponse getResp = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, getResp.responseCode);
assertTrue(getResp.getJobCluster().get().isDisabled());
EnableJobClusterRequest req2 = new JobClusterManagerProto.EnableJobClusterRequest(
clusterName,
"user");
jobClusterManagerActor.tell(req2, probe.getRef());
JobClusterManagerProto.EnableJobClusterResponse updateResp2 = probe.expectMsgClass(
JobClusterManagerProto.EnableJobClusterResponse.class);
assertEquals(SUCCESS, updateResp2.responseCode);
jobClusterManagerActor.tell(new GetJobClusterRequest(clusterName), probe.getRef());
getResp = probe.expectMsgClass(GetJobClusterResponse.class);
assertEquals(SUCCESS, getResp.responseCode);
assertFalse(getResp.getJobCluster().get().isDisabled());
}
@Test
public void testJobSubmit() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmit";
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
Lists.newArrayList());
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
System.out.println("response----->" + resp);
assertEquals(SUCCESS_CREATED, resp.responseCode);
JobDefinition jobDefn;
try {
jobDefn = createJob(clusterName);
jobClusterManagerActor.tell(
new JobClusterManagerProto.SubmitJobRequest(
clusterName,
"me",
jobDefn),
probe.getRef());
JobClusterManagerProto.SubmitJobResponse submitResp = probe.expectMsgClass(
JobClusterManagerProto.SubmitJobResponse.class);
assertEquals(SUCCESS, submitResp.responseCode);
jobClusterManagerActor.tell(
new JobClusterManagerProto.KillJobRequest(
clusterName + "-1",
"",
clusterName),
probe.getRef());
JobClusterManagerProto.KillJobResponse kill = probe.expectMsgClass(
JobClusterManagerProto.KillJobResponse.class);
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//assertEquals(jobClusterManagerActor, probe.getLastSender().path());
}
@Test
public void testWorkerList() {
TestKit probe = new TestKit(system);
String clusterName = "testWorkerList";
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
Lists.newArrayList());
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
System.out.println("response----->" + resp);
assertEquals(SUCCESS_CREATED, resp.responseCode);
JobDefinition jobDefn;
try {
jobDefn = createJob(clusterName);
jobClusterManagerActor.tell(
new JobClusterManagerProto.SubmitJobRequest(
clusterName,
"me",
jobDefn),
probe.getRef());
JobClusterManagerProto.SubmitJobResponse submitResp = probe.expectMsgClass(
JobClusterManagerProto.SubmitJobResponse.class);
assertEquals(SUCCESS, submitResp.responseCode);
jobClusterManagerActor.tell(new JobClusterManagerProto.ListWorkersRequest(new JobId(
clusterName,
1)), probe.getRef());
JobClusterManagerProto.ListWorkersResponse listWorkersResponse = probe.expectMsgClass(
JobClusterManagerProto.ListWorkersResponse.class);
assertEquals(SUCCESS, listWorkersResponse.responseCode);
assertEquals(1, listWorkersResponse.getWorkerMetadata().size());
// send list workers request to non existent cluster
jobClusterManagerActor.tell(new JobClusterManagerProto.ListWorkersRequest(new JobId(
"randomCluster",
1)), probe.getRef());
JobClusterManagerProto.ListWorkersResponse listWorkersResponse2 = probe.expectMsgClass(
JobClusterManagerProto.ListWorkersResponse.class);
assertEquals(CLIENT_ERROR, listWorkersResponse2.responseCode);
assertEquals(0, listWorkersResponse2.getWorkerMetadata().size());
jobClusterManagerActor.tell(
new JobClusterManagerProto.KillJobRequest(
clusterName + "-1",
"",
clusterName),
probe.getRef());
JobClusterManagerProto.KillJobResponse kill = probe.expectMsgClass(
JobClusterManagerProto.KillJobResponse.class);
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//assertEquals(jobClusterManagerActor, probe.getLastSender().path());
}
@Test
public void testGetJobIdSubject() {
TestKit probe = new TestKit(system);
String clusterName = "testGetJobIdSubject";
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
Lists.newArrayList());
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
System.out.println("response----->" + resp);
assertEquals(SUCCESS_CREATED, resp.responseCode);
JobDefinition jobDefn;
try {
jobClusterManagerActor.tell(
new GetLastSubmittedJobIdStreamRequest(clusterName),
probe.getRef());
JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse getLastSubmittedJobIdStreamResponse = probe
.expectMsgClass(JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse.class);
assertEquals(SUCCESS, getLastSubmittedJobIdStreamResponse.responseCode);
CountDownLatch jobIdLatch = new CountDownLatch(1);
assertTrue(getLastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject().isPresent());
BehaviorSubject<JobId> jobIdBehaviorSubject =
getLastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject().get();
jobIdBehaviorSubject.subscribeOn(Schedulers.io()).subscribe((jId) -> {
System.out.println("Got Jid -> " + jId);
assertEquals(clusterName + "-1", jId.getId());
jobIdLatch.countDown();
});
jobDefn = createJob(clusterName);
jobClusterManagerActor.tell(
new JobClusterManagerProto.SubmitJobRequest(
clusterName,
"me",
jobDefn),
probe.getRef());
JobClusterManagerProto.SubmitJobResponse submitResp = probe.expectMsgClass(
JobClusterManagerProto.SubmitJobResponse.class);
assertEquals(SUCCESS, submitResp.responseCode);
jobIdLatch.await(1, TimeUnit.SECONDS);
// try a non existent cluster
jobClusterManagerActor.tell(
new GetLastSubmittedJobIdStreamRequest("randomC"),
probe.getRef());
getLastSubmittedJobIdStreamResponse = probe.expectMsgClass(
JobClusterManagerProto.GetLastSubmittedJobIdStreamResponse.class);
assertEquals(CLIENT_ERROR_NOT_FOUND, getLastSubmittedJobIdStreamResponse.responseCode);
assertTrue(!getLastSubmittedJobIdStreamResponse.getjobIdBehaviorSubject().isPresent());
jobClusterManagerActor.tell(
new JobClusterManagerProto.KillJobRequest(
clusterName + "-1",
"",
clusterName),
probe.getRef());
JobClusterManagerProto.KillJobResponse kill = probe.expectMsgClass(
JobClusterManagerProto.KillJobResponse.class);
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
} catch (InterruptedException e) {
e.printStackTrace();
}
//assertEquals(jobClusterManagerActor, probe.getLastSender().path());
}
@Test
public void testJobSubmitToNonExistentCluster() {
TestKit probe = new TestKit(system);
String clusterName = "testJobSubmitToNonExistentClusterCluster";
JobDefinition jobDefn;
try {
jobDefn = createJob(clusterName);
jobClusterManagerActor.tell(
new JobClusterManagerProto.SubmitJobRequest(
clusterName,
"me",
jobDefn),
probe.getRef());
JobClusterManagerProto.SubmitJobResponse submitResp = probe.expectMsgClass(
JobClusterManagerProto.SubmitJobResponse.class);
assertEquals(CLIENT_ERROR_NOT_FOUND, submitResp.responseCode);
} catch (InvalidJobException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail();
}
//assertEquals(jobClusterManagerActor, probe.getLastSender().path());
}
@Test
public void testTerminalEventFromZombieWorkerIgnored() {
TestKit probe = new TestKit(system);
String clusterName = "testZombieWorkerHandling";
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
Lists.newArrayList());
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
System.out.println("response----->" + resp);
assertEquals(SUCCESS_CREATED, resp.responseCode);
WorkerId zWorker1 = new WorkerId("randomCluster2", "randomCluster2-1", 0, 1);
JobTestHelper.sendWorkerTerminatedEvent(probe,
jobClusterManagerActor,
"randomCluster2-1",
zWorker1);
verify(schedulerMock, timeout(1_000).times(0)).unscheduleAndTerminateWorker(zWorker1,
empty());
}
@Test
public void testNonTerminalEventFromZombieWorkerLeadsToTermination() {
TestKit probe = new TestKit(system);
String clusterName = "testNonTerminalEventFromZombieWorkerLeadsToTermination";
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(
clusterName,
Lists.newArrayList());
jobClusterManagerActor.tell(new JobClusterManagerProto.CreateJobClusterRequest(
fakeJobCluster,
"user"), probe.getRef());
JobClusterManagerProto.CreateJobClusterResponse resp = probe.expectMsgClass(
JobClusterManagerProto.CreateJobClusterResponse.class);
System.out.println("response----->" + resp);
assertEquals(SUCCESS_CREATED, resp.responseCode);
WorkerId zWorker1 = new WorkerId("randomCluster", "randomCluster-1", 0, 1);
when(jobStoreMock.getArchivedJob(zWorker1.getJobId()))
.thenReturn(Optional.of(
new MantisJobMetadataImpl.Builder().withJobDefinition(mock(JobDefinition.class))
.build()));
JobTestHelper.sendStartInitiatedEvent(probe, jobClusterManagerActor, 1, zWorker1);
verify(schedulerMock, timeout(1_000).times(1)).unscheduleAndTerminateWorker(zWorker1,
empty());
}
}
| 7,897 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/resourcecluster/ExecutorStateManagerTests.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import io.mantisrx.common.WorkerConstants;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.common.util.DelegateClock;
import io.mantisrx.master.resourcecluster.ExecutorStateManagerImpl.TaskExecutorHolder;
import io.mantisrx.master.resourcecluster.ResourceClusterActor.TaskExecutorAssignmentRequest;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.server.core.TestingRpcService;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorAllocationRequest;
import io.mantisrx.server.master.resourcecluster.TaskExecutorID;
import io.mantisrx.server.master.resourcecluster.TaskExecutorRegistration;
import io.mantisrx.server.master.resourcecluster.TaskExecutorReport;
import io.mantisrx.server.master.resourcecluster.TaskExecutorStatusChange;
import io.mantisrx.server.master.scheduler.JobMessageRouter;
import io.mantisrx.server.worker.TaskExecutorGateway;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.time.Clock;
import java.time.Instant;
import java.time.ZoneId;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.Before;
import org.junit.Test;
public class ExecutorStateManagerTests {
private final AtomicReference<Clock> actual =
new AtomicReference<>(Clock.fixed(Instant.ofEpochSecond(1), ZoneId.systemDefault()));
private final Clock clock = new DelegateClock(actual);
private final TestingRpcService rpc = new TestingRpcService();
private final TaskExecutorGateway gateway = mock(TaskExecutorGateway.class);
private final JobMessageRouter router = mock(JobMessageRouter.class);
private final TaskExecutorState state1 = TaskExecutorState.of(clock, rpc, router);
private final TaskExecutorState state2 = TaskExecutorState.of(clock, rpc, router);
private final TaskExecutorState state3 = TaskExecutorState.of(clock, rpc, router);
private static final ClusterID CLUSTER_ID = ClusterID.of("clusterId");
private static final TaskExecutorID TASK_EXECUTOR_ID_1 = TaskExecutorID.of("taskExecutorId1");
private static final TaskExecutorID TASK_EXECUTOR_ID_2 = TaskExecutorID.of("taskExecutorId2");
private static final TaskExecutorID TASK_EXECUTOR_ID_3 = TaskExecutorID.of("taskExecutorId3");
private static final String TASK_EXECUTOR_ADDRESS = "127.0.0.1";
private static final String HOST_NAME = "hostName";
private static final WorkerPorts WORKER_PORTS = new WorkerPorts(ImmutableList.of(1, 2, 3, 4, 5));
private static final MachineDefinition MACHINE_DEFINITION_1 =
new MachineDefinition(1.0, 2.0, 3.0, 4.0, 5);
private static final MachineDefinition MACHINE_DEFINITION_2 =
new MachineDefinition(4.0, 2.0, 3.0, 4.0, 5);
private static final Map<String, String> ATTRIBUTES =
ImmutableMap.of("attr1", "attr2");
private static final String SCALE_GROUP_1 = "io-mantisrx-v001";
private static final String SCALE_GROUP_2 = "io-mantisrx-v002";
private static final Map<String, String> ATTRIBUTES_WITH_SCALE_GROUP_1 =
ImmutableMap.of(WorkerConstants.AUTO_SCALE_GROUP_KEY, SCALE_GROUP_1);
private static final Map<String, String> ATTRIBUTES_WITH_SCALE_GROUP_2 =
ImmutableMap.of(WorkerConstants.AUTO_SCALE_GROUP_KEY, SCALE_GROUP_2);
private static final WorkerId WORKER_ID = WorkerId.fromIdUnsafe("late-sine-function-tutorial-1-worker-0-1");
private final TaskExecutorRegistration registration1 =
getRegistrationBuilder(TASK_EXECUTOR_ID_1, MACHINE_DEFINITION_1, ATTRIBUTES).build();
private final TaskExecutorRegistration registration2 =
getRegistrationBuilder(TASK_EXECUTOR_ID_2, MACHINE_DEFINITION_2, ATTRIBUTES).build();
private final TaskExecutorRegistration registration3 =
getRegistrationBuilder(TASK_EXECUTOR_ID_3, MACHINE_DEFINITION_2, ATTRIBUTES).build();
private static TaskExecutorRegistration.TaskExecutorRegistrationBuilder getRegistrationBuilder(
TaskExecutorID id,
MachineDefinition mDef,
Map<String,
String> attributes) {
return TaskExecutorRegistration.builder()
.taskExecutorID(id)
.clusterID(CLUSTER_ID)
.taskExecutorAddress(TASK_EXECUTOR_ADDRESS)
.hostname(HOST_NAME)
.workerPorts(WORKER_PORTS)
.machineDefinition(mDef)
.taskExecutorAttributes(attributes);
}
private final ExecutorStateManager stateManager = new ExecutorStateManagerImpl();
@Before
public void setup() {
rpc.registerGateway(TASK_EXECUTOR_ADDRESS, gateway);
}
@Test
public void testGetBestFit() {
Optional<Pair<TaskExecutorID, TaskExecutorState>> bestFitO =
stateManager.findBestFit(new TaskExecutorAssignmentRequest(
TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION_2, null, 0),
CLUSTER_ID));
assertFalse(bestFitO.isPresent());
stateManager.trackIfAbsent(TASK_EXECUTOR_ID_1, state1);
state1.onRegistration(registration1);
state1.onTaskExecutorStatusChange(new TaskExecutorStatusChange(TASK_EXECUTOR_ID_1, CLUSTER_ID,
TaskExecutorReport.available()));
stateManager.tryMarkAvailable(TASK_EXECUTOR_ID_1);
stateManager.trackIfAbsent(TASK_EXECUTOR_ID_2, state2);
state2.onRegistration(registration2);
state2.onTaskExecutorStatusChange(new TaskExecutorStatusChange(TASK_EXECUTOR_ID_2, CLUSTER_ID,
TaskExecutorReport.available()));
stateManager.tryMarkAvailable(TASK_EXECUTOR_ID_2);
stateManager.trackIfAbsent(TASK_EXECUTOR_ID_3, state3);
state3.onRegistration(registration3);
// test machine def 1
bestFitO =
stateManager.findBestFit(new TaskExecutorAssignmentRequest(
TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION_1, null, 0), CLUSTER_ID));
assertTrue(bestFitO.isPresent());
assertEquals(TASK_EXECUTOR_ID_1, bestFitO.get().getLeft());
assertEquals(state1, bestFitO.get().getRight());
bestFitO =
stateManager.findBestFit(new TaskExecutorAssignmentRequest(
TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION_2, null, 0), CLUSTER_ID));
assertTrue(bestFitO.isPresent());
assertEquals(TASK_EXECUTOR_ID_2, bestFitO.get().getLeft());
assertEquals(state2, bestFitO.get().getRight());
// disable e1 and should get nothing
state1.onTaskExecutorStatusChange(new TaskExecutorStatusChange(TASK_EXECUTOR_ID_1, CLUSTER_ID,
TaskExecutorReport.occupied(WORKER_ID)));
bestFitO =
stateManager.findBestFit(new TaskExecutorAssignmentRequest(
TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION_1, null, 0), CLUSTER_ID));
assertFalse(bestFitO.isPresent());
// enable e3 and disable e2
state3.onTaskExecutorStatusChange(new TaskExecutorStatusChange(TASK_EXECUTOR_ID_3, CLUSTER_ID,
TaskExecutorReport.available()));
stateManager.tryMarkAvailable(TASK_EXECUTOR_ID_3);
state2.onTaskExecutorStatusChange(new TaskExecutorStatusChange(TASK_EXECUTOR_ID_2, CLUSTER_ID,
TaskExecutorReport.occupied(WORKER_ID)));
bestFitO =
stateManager.findBestFit(new TaskExecutorAssignmentRequest(
TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION_2, null, 0), CLUSTER_ID));
assertTrue(bestFitO.isPresent());
assertEquals(TASK_EXECUTOR_ID_3, bestFitO.get().getLeft());
assertEquals(state3, bestFitO.get().getRight());
// test mark as unavailable
stateManager.tryMarkUnavailable(TASK_EXECUTOR_ID_3);
bestFitO =
stateManager.findBestFit(new TaskExecutorAssignmentRequest(
TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION_2, null, 0), CLUSTER_ID));
assertFalse(bestFitO.isPresent());
}
@Test
public void testTaskExecutorHolderCreation() {
TaskExecutorHolder taskExecutorHolder = TaskExecutorHolder.of(
TASK_EXECUTOR_ID_1,
getRegistrationBuilder(TASK_EXECUTOR_ID_1, MACHINE_DEFINITION_1, ATTRIBUTES).build());
assertEquals("empty-generation", taskExecutorHolder.getGeneration());
assertEquals(TASK_EXECUTOR_ID_1, taskExecutorHolder.getId());
taskExecutorHolder = TaskExecutorHolder.of(
TASK_EXECUTOR_ID_2,
getRegistrationBuilder(TASK_EXECUTOR_ID_2, MACHINE_DEFINITION_1, ATTRIBUTES_WITH_SCALE_GROUP_1).build());
assertEquals(SCALE_GROUP_1, taskExecutorHolder.getGeneration());
assertEquals(TASK_EXECUTOR_ID_2, taskExecutorHolder.getId());
taskExecutorHolder = TaskExecutorHolder.of(
TASK_EXECUTOR_ID_2,
getRegistrationBuilder(TASK_EXECUTOR_ID_2, MACHINE_DEFINITION_2, ATTRIBUTES_WITH_SCALE_GROUP_2).build());
assertEquals(SCALE_GROUP_2, taskExecutorHolder.getGeneration());
assertEquals(TASK_EXECUTOR_ID_2, taskExecutorHolder.getId());
ImmutableMap<String, String> attributeWithGeneration = ImmutableMap.of(
WorkerConstants.AUTO_SCALE_GROUP_KEY, SCALE_GROUP_1,
WorkerConstants.MANTIS_WORKER_CONTAINER_GENERATION, SCALE_GROUP_2);
taskExecutorHolder = TaskExecutorHolder.of(
TASK_EXECUTOR_ID_2,
getRegistrationBuilder(TASK_EXECUTOR_ID_2, MACHINE_DEFINITION_2, attributeWithGeneration).build());
assertEquals(SCALE_GROUP_2, taskExecutorHolder.getGeneration());
assertEquals(TASK_EXECUTOR_ID_2, taskExecutorHolder.getId());
}
@Test
public void testGetBestFit_WithGenerationFromScaleGroup() {
Optional<Pair<TaskExecutorID, TaskExecutorState>> bestFitO =
stateManager.findBestFit(
new TaskExecutorAssignmentRequest(
TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION_2, null, 0),
CLUSTER_ID));
assertFalse(bestFitO.isPresent());
/*
Setup 3 TE where te1 is in group 2 while te2/3 in group 1. The best fit should be te1.
*/
// add te0 to another mDef, should not be chosen.
TaskExecutorState teState0 = registerNewTaskExecutor(TaskExecutorID.of("te0"),
MACHINE_DEFINITION_2,
ATTRIBUTES_WITH_SCALE_GROUP_2,
stateManager);
TaskExecutorState teState1 = registerNewTaskExecutor(TASK_EXECUTOR_ID_1,
MACHINE_DEFINITION_1,
ATTRIBUTES_WITH_SCALE_GROUP_2,
stateManager);
TaskExecutorState teState2 = registerNewTaskExecutor(TASK_EXECUTOR_ID_2,
MACHINE_DEFINITION_1,
ATTRIBUTES_WITH_SCALE_GROUP_1,
stateManager);
TaskExecutorState teState3 = registerNewTaskExecutor(TASK_EXECUTOR_ID_3,
MACHINE_DEFINITION_1,
ATTRIBUTES_WITH_SCALE_GROUP_1,
stateManager);
// should get te1 with group2
bestFitO =
stateManager.findBestFit(
new TaskExecutorAssignmentRequest(
TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION_1, null, 0),
CLUSTER_ID));
assertTrue(bestFitO.isPresent());
assertEquals(TASK_EXECUTOR_ID_1, bestFitO.get().getLeft());
assertEquals(teState1, bestFitO.get().getRight());
// add new TE in group1 doesn't affect result.
TaskExecutorState teState4 = registerNewTaskExecutor(TaskExecutorID.of("te4"),
MACHINE_DEFINITION_1,
ATTRIBUTES_WITH_SCALE_GROUP_1,
stateManager);
bestFitO =
stateManager.findBestFit(
new TaskExecutorAssignmentRequest(
TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION_1, null, 0),
CLUSTER_ID));
assertTrue(bestFitO.isPresent());
assertEquals(TASK_EXECUTOR_ID_1, bestFitO.get().getLeft());
assertEquals(teState1, bestFitO.get().getRight());
// remove te1 and add new te in both groups
teState1.onTaskExecutorStatusChange(
new TaskExecutorStatusChange(TASK_EXECUTOR_ID_1, CLUSTER_ID, TaskExecutorReport.occupied(WORKER_ID)));
TaskExecutorID te5Id = TaskExecutorID.of("te5");
TaskExecutorState teState5 = registerNewTaskExecutor(te5Id,
MACHINE_DEFINITION_1,
ATTRIBUTES_WITH_SCALE_GROUP_2,
stateManager);
TaskExecutorState teState6 = registerNewTaskExecutor(TaskExecutorID.of("te6"),
MACHINE_DEFINITION_1,
ATTRIBUTES_WITH_SCALE_GROUP_1,
stateManager);
bestFitO =
stateManager.findBestFit(
new TaskExecutorAssignmentRequest(
TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION_1, null, 0),
CLUSTER_ID));
assertTrue(bestFitO.isPresent());
assertEquals(te5Id, bestFitO.get().getLeft());
assertEquals(teState5, bestFitO.get().getRight());
// disable all group2 TEs and allow bestFit from group1
teState5.onTaskExecutorStatusChange(
new TaskExecutorStatusChange(te5Id, CLUSTER_ID, TaskExecutorReport.occupied(WORKER_ID)));
bestFitO =
stateManager.findBestFit(
new TaskExecutorAssignmentRequest(
TaskExecutorAllocationRequest.of(WORKER_ID, MACHINE_DEFINITION_1, null, 0),
CLUSTER_ID));
assertTrue(bestFitO.isPresent());
assertNotEquals(te5Id, bestFitO.get().getLeft());
assertNotEquals(TASK_EXECUTOR_ID_1, bestFitO.get().getLeft());
assertEquals(SCALE_GROUP_1,
Objects.requireNonNull(bestFitO.get().getRight().getRegistration())
.getAttributeByKey(WorkerConstants.AUTO_SCALE_GROUP_KEY).orElse("invalid"));
assertNotNull(stateManager.get(TASK_EXECUTOR_ID_1));
assertNull(stateManager.get(TaskExecutorID.of("invalid")));
}
private TaskExecutorState registerNewTaskExecutor(TaskExecutorID id, MachineDefinition mdef,
Map<String, String> attributes,
ExecutorStateManager stateManager) {
TaskExecutorState state = TaskExecutorState.of(clock, rpc, router);
TaskExecutorRegistration reg = getRegistrationBuilder(id, mdef, attributes).build();
stateManager.trackIfAbsent(id, state);
state.onRegistration(reg);
state.onTaskExecutorStatusChange(
new TaskExecutorStatusChange(
id,
CLUSTER_ID,
TaskExecutorReport.available()));
stateManager.tryMarkAvailable(id);
return state;
}
}
| 7,898 |
0 | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master | Create_ds/mantis/mantis-control-plane/mantis-control-plane-server/src/test/java/io/mantisrx/master/resourcecluster/DisableTaskExecutorsRequestTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.master.resourcecluster;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import io.mantisrx.server.master.resourcecluster.ClusterID;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.time.Duration;
import java.time.Instant;
import java.util.Optional;
import org.junit.Test;
public class DisableTaskExecutorsRequestTest {
private static final DisableTaskExecutorsRequest R1 =
new DisableTaskExecutorsRequest(ImmutableMap.of("attr1", "attr1"), ClusterID.of("cluster1"), Instant.now(), Optional.empty());
private static final DisableTaskExecutorsRequest R2 =
new DisableTaskExecutorsRequest(ImmutableMap.of("attr2", "attr2"), ClusterID.of("cluster1"), Instant.now(), Optional.empty());
private static final DisableTaskExecutorsRequest R3 =
new DisableTaskExecutorsRequest(ImmutableMap.of("attr1", "attr1"), ClusterID.of("cluster2"), Instant.now(), Optional.empty());
private static final DisableTaskExecutorsRequest R4 =
new DisableTaskExecutorsRequest(ImmutableMap.of("attr1", "attr1"), ClusterID.of("cluster1"), Instant.now().plus(Duration.ofDays(1)), Optional.empty());
@Test
public void checkIfDifferentRequestsHaveDifferentHashes() {
assertNotEquals(R1.getHash(), R2.getHash());
}
@Test
public void checkIfDifferentClustersHaveDifferentHashes() {
assertNotEquals(R1.getHash(), R3.getHash());
}
@Test
public void checkIfSimilarRequestsHaveSameHashes() {
assertEquals(R1.getHash(), R4.getHash());
}
// TODO(fdichiara): add tests with new field.
}
| 7,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.