index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker/client/MantisHttpClientBuilder.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.client;
import io.netty.bootstrap.Bootstrap;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientBuilder;
public class MantisHttpClientBuilder<I, O> extends HttpClientBuilder<I, O> {
public MantisHttpClientBuilder(String host, int port) {
super(host, port, new Bootstrap());
}
@Override
protected HttpClient<I, O> createClient() {
if (null == super.poolBuilder) {
return new MantisHttpClientImpl<I, O>(this.getOrCreateName(), this.serverInfo, this.bootstrap, this.pipelineConfigurator, this.clientConfig, this.channelFactory, this.connectionFactory, this.eventsSubject);
} else {
return new MantisHttpClientImpl<I, O>(this.getOrCreateName(), this.serverInfo, this.bootstrap, this.pipelineConfigurator, this.clientConfig, this.poolBuilder, this.eventsSubject);
}
}
}
| 8,400 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker/client/SseWorkerConnectionFunction.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.client;
import static com.mantisrx.common.utils.MantisMetricStringConstants.DROP_OPERATOR_INCOMING_METRIC_GROUP;
import com.mantisrx.common.utils.NettyUtils;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.mantisrx.server.core.ServiceRegistry;
import io.reactivx.mantis.operators.DropOperator;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Action1;
public class SseWorkerConnectionFunction implements WorkerConnectionFunc<MantisServerSentEvent> {
private static final String DEFAULT_BUFFER_SIZE_STR = "0";
private static final Logger logger = LoggerFactory.getLogger(SseWorkerConnectionFunction.class);
private static final CopyOnWriteArraySet<MetricGroupId> metricsSet = new CopyOnWriteArraySet<>();
private static final MetricGroupId metricGroupId;
private static final Action1<Throwable> defaultConxResetHandler = new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
logger.warn("Retrying reset connection");
try {Thread.sleep(500);} catch (InterruptedException ie) {
logger.debug("Interrupted waiting for retrying connection");
}
}
};
static {
// Use single netty thread
NettyUtils.setNettyThreads();
metricGroupId = new MetricGroupId(DROP_OPERATOR_INCOMING_METRIC_GROUP + "_SseWorkerMetricsConnectionFunction_withBuffer");
metricsSet.add(metricGroupId);
logger.info("SETTING UP METRICS PRINTER THREAD");
new ScheduledThreadPoolExecutor(1).scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
try {
Set<MetricGroupId> metricGroups = new HashSet<>(metricsSet);
if (!metricGroups.isEmpty()) {
for (MetricGroupId metricGroupId : metricGroups) {
final Metrics metric = MetricsRegistry.getInstance().getMetric(metricGroupId);
if (metric != null) {
final Counter onNext = metric.getCounter("" + DropOperator.Counters.onNext);
final Counter onError = metric.getCounter("" + DropOperator.Counters.onError);
final Counter onComplete = metric.getCounter("" + DropOperator.Counters.onComplete);
final Counter dropped = metric.getCounter("" + DropOperator.Counters.dropped);
logger.info(metricGroupId.id() + ": onNext=" + onNext.value() + ", onError=" + onError.value() +
", onComplete=" + onComplete.value() + ", dropped=" + dropped.value()
// + ", buffered=" + buffered.value()
);
}
}
}
} catch (Exception e) {
logger.warn("Unexpected error in metrics printer thread: " + e.getMessage(), e);
}
}
}, 60, 60, TimeUnit.SECONDS);
}
private final boolean reconnectUponConnectionRest;
private final Action1<Throwable> connectionResetHandler;
private final SinkParameters sinkParameters;
private final int bufferSize;
public SseWorkerConnectionFunction(boolean reconnectUponConnectionRest, Action1<Throwable> connectionResetHandler) {
this(reconnectUponConnectionRest, connectionResetHandler, null);
}
public SseWorkerConnectionFunction(boolean reconnectUponConnectionRest, Action1<Throwable> connectionResetHandler, SinkParameters sinkParameters) {
this.reconnectUponConnectionRest = reconnectUponConnectionRest;
this.connectionResetHandler = connectionResetHandler == null ? defaultConxResetHandler : connectionResetHandler;
this.sinkParameters = sinkParameters;
String bufferSizeStr = ServiceRegistry.INSTANCE.getPropertiesService().getStringValue("workerClient.buffer.size", DEFAULT_BUFFER_SIZE_STR);
bufferSize = Integer.parseInt(bufferSizeStr);
}
@Override
public WorkerConnection<MantisServerSentEvent> call(String hostname, Integer port) {
return call(hostname, port, null, null, 5);
}
@Override
public WorkerConnection<MantisServerSentEvent> call(final String hostname, final Integer port,
final Action1<Boolean> updateConxStatus,
final Action1<Boolean> updateDataRecvngStatus, final long dataRecvTimeoutSecs) {
return new WorkerConnection<MantisServerSentEvent>() {
private final SseWorkerConnection workerConn =
new SseWorkerConnection("WorkerMetrics", hostname, port, updateConxStatus, updateDataRecvngStatus,
connectionResetHandler, dataRecvTimeoutSecs, reconnectUponConnectionRest, metricsSet,
bufferSize, sinkParameters,metricGroupId);
@Override
public String getName() {
return workerConn.getName();
}
@Override
public void close() throws Exception {
workerConn.close();
}
@Override
public Observable<MantisServerSentEvent> call() {
return workerConn.call();
}
};
}
}
| 8,401 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker/client/JobWorkerMetricsLocator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.client;
import io.reactivex.mantis.remote.observable.EndpointChange;
import rx.Observable;
public interface JobWorkerMetricsLocator {
public Observable<EndpointChange> locateWorkerMetricsForJob(String jobId);
}
| 8,402 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker/client/WorkerConnectionsStatus.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.client;
public class WorkerConnectionsStatus {
private final long numConnected;
private final long total;
private final long recevingDataFrom;
public WorkerConnectionsStatus(long recevingDataFrom, long numConnected, long total) {
this.recevingDataFrom = recevingDataFrom;
this.numConnected = numConnected;
this.total = total;
}
public long getRecevingDataFrom() {
return recevingDataFrom;
}
public long getNumConnected() {
return numConnected;
}
public long getTotal() {
return total;
}
@Override
public String toString() {
return "WorkerConnectionsStatus{" +
"numConnected=" + numConnected +
", total=" + total +
", recevingDataFrom=" + recevingDataFrom +
'}';
}
}
| 8,403 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker/client/WorkerMetricsClient.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.client;
import com.mantisrx.common.utils.Services;
import io.mantisrx.server.core.Configurations;
import io.mantisrx.server.core.CoreConfiguration;
import io.mantisrx.server.master.client.HighAvailabilityServices;
import io.mantisrx.server.master.client.HighAvailabilityServicesUtil;
import io.mantisrx.server.master.client.MantisMasterGateway;
import io.mantisrx.server.master.client.MasterClientWrapper;
import io.reactivex.mantis.remote.observable.EndpointChange;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.functions.Func1;
import rx.subjects.PublishSubject;
public class WorkerMetricsClient {
private static final Logger logger = LoggerFactory.getLogger(WorkerMetricsClient.class);
private final MasterClientWrapper clientWrapper;
private final JobWorkerMetricsLocator jobWrokerMetricsLocator = new JobWorkerMetricsLocator() {
@Override
public Observable<EndpointChange> locateWorkerMetricsForJob(final String jobId) {
return clientWrapper.getMasterClientApi()
.flatMap(new Func1<MantisMasterGateway, Observable<EndpointChange>>() {
@Override
public Observable<EndpointChange> call(MantisMasterGateway mantisMasterClientApi) {
logger.info("Getting worker metrics locations for " + jobId);
return clientWrapper.getAllWorkerMetricLocations(jobId);
}
});
}
};
/**
* The following properties are required:
* <UL>
* <LI>
* #default 1000<br>
* mantis.zookeeper.connectionTimeMs=1000
* </LI>
* <LI>
* # default 500<br>
* mantis.zookeeper.connection.retrySleepMs=500
* </LI>
* <LI>
* # default 5<br>
* mantis.zookeeper.connection.retryCount=5
* </LI>
* <LI>
* # default NONE<br>
* mantis.zookeeper.connectString=
* </LI>
* <LI>
* #default NONE<br>
* mantis.zookeeper.root=
* </LI>
* <LI>
* #default /leader <br>
* mantis.zookeeper.leader.announcement.path=
* </LI>
* </UL>
*
* @param properties
*/
public WorkerMetricsClient(Properties properties) {
this(Configurations.frmProperties(properties, CoreConfiguration.class));
}
public WorkerMetricsClient(CoreConfiguration configuration) {
HighAvailabilityServices haServices =
HighAvailabilityServicesUtil.createHAServices(configuration);
Services.startAndWait(haServices);
clientWrapper = new MasterClientWrapper(haServices.getMasterClientApi());
}
public WorkerMetricsClient(MantisMasterGateway gateway) {
clientWrapper = new MasterClientWrapper(gateway);
}
public JobWorkerMetricsLocator getWorkerMetricsLocator() {
return jobWrokerMetricsLocator;
}
/* package */ MasterClientWrapper getClientWrapper() {
return clientWrapper;
}
public <T> MetricsClient<T> getMetricsClientByJobId(final String jobId, final WorkerConnectionFunc<T> workerConnectionFunc,
Observer<WorkerConnectionsStatus> workerConnectionsStatusObserver) {
return getMetricsClientByJobId(jobId, workerConnectionFunc, workerConnectionsStatusObserver, 5);
}
public <T> MetricsClient<T> getMetricsClientByJobId(final String jobId, final WorkerConnectionFunc<T> workerConnectionFunc,
Observer<WorkerConnectionsStatus> workerConnectionsStatusObserver, long dataRecvTimeoutSecs) {
PublishSubject<MasterClientWrapper.JobNumWorkers> numWrkrsSubject = PublishSubject.create();
clientWrapper.addNumWorkersObserver(numWrkrsSubject);
return new MetricsClientImpl<T>(jobId, workerConnectionFunc, getWorkerMetricsLocator(),
numWrkrsSubject
.filter(new Func1<MasterClientWrapper.JobNumWorkers, Boolean>() {
@Override
public Boolean call(MasterClientWrapper.JobNumWorkers jobNumWorkers) {
return jobId.equals(jobNumWorkers.getJobId());
}
})
.map(new Func1<MasterClientWrapper.JobNumWorkers, Integer>() {
@Override
public Integer call(MasterClientWrapper.JobNumWorkers jobNumWorkers) {
return jobNumWorkers.getNumWorkers();
}
}),
workerConnectionsStatusObserver, dataRecvTimeoutSecs);
}
}
| 8,404 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker/client/SseWorkerConnection.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.client;
import static com.mantisrx.common.utils.MantisMetricStringConstants.DROP_OPERATOR_INCOMING_METRIC_GROUP;
import com.mantisrx.common.utils.MantisSSEConstants;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.common.compression.CompressionUtils;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.runtime.parameter.SinkParameter;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.netty.buffer.ByteBuf;
import io.netty.handler.logging.LogLevel;
import io.reactivx.mantis.operators.DropOperator;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import mantis.io.reactivex.netty.pipeline.PipelineConfigurators;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientBuilder;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.functions.Func2;
import rx.subjects.PublishSubject;
public class SseWorkerConnection {
private static final Logger logger = LoggerFactory.getLogger(SseWorkerConnection.class);
private static final String metricNamePrefix = DROP_OPERATOR_INCOMING_METRIC_GROUP;
protected final PublishSubject<Boolean> shutdownSubject = PublishSubject.create();
final AtomicLong lastDataReceived = new AtomicLong(System.currentTimeMillis());
private final String connectionType;
private final String hostname;
private final int port;
private final MetricGroupId metricGroupId;
private final Counter pingCounter;
private final boolean reconnectUponConnectionReset;
private final Action1<Boolean> updateConxStatus;
private final Action1<Boolean> updateDataRecvngStatus;
private final Action1<Throwable> connectionResetHandler;
private final long dataRecvTimeoutSecs;
private final CopyOnWriteArraySet<MetricGroupId> metricsSet;
private final int bufferSize;
private final SinkParameters sinkParameters;
private final boolean disablePingFiltering;
private final AtomicBoolean isConnected = new AtomicBoolean(false);
private final AtomicBoolean isReceivingData = new AtomicBoolean(false);
HttpClient<ByteBuf, ServerSentEvent> client;
private boolean compressedBinaryInputEnabled = false;
private volatile boolean isShutdown = false;
private final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic =
new Func1<Observable<? extends Throwable>, Observable<?>>() {
@Override
public Observable<?> call(Observable<? extends Throwable> attempts) {
if (!reconnectUponConnectionReset)
return Observable.empty();
return attempts
.zipWith(Observable.range(1, Integer.MAX_VALUE), new Func2<Throwable, Integer, Integer>() {
@Override
public Integer call(Throwable t1, Integer integer) {
return integer;
}
})
.flatMap(new Func1<Integer, Observable<?>>() {
@Override
public Observable<?> call(Integer integer) {
if (isShutdown) {
logger.info(getName() + ": Is shutdown, stopping retries");
return Observable.empty();
}
long delay = 2 * (integer > 10 ? 10 : integer);
logger.info(getName() + ": retrying conx after sleeping for " + delay + " secs");
return Observable.timer(delay, TimeUnit.SECONDS);
}
});
}
};
private long lastDataDropValue = 0L;
public SseWorkerConnection(final String connectionType,
final String hostname,
final Integer port,
final Action1<Boolean> updateConxStatus,
final Action1<Boolean> updateDataRecvngStatus,
final Action1<Throwable> connectionResetHandler,
final long dataRecvTimeoutSecs,
final boolean reconnectUponConnectionReset,
final CopyOnWriteArraySet<MetricGroupId> metricsSet,
final int bufferSize,
final SinkParameters sinkParameters,
final MetricGroupId metricGroupId) {
this(connectionType, hostname, port, updateConxStatus, updateDataRecvngStatus, connectionResetHandler,
dataRecvTimeoutSecs, reconnectUponConnectionReset, metricsSet, bufferSize, sinkParameters, false,
metricGroupId);
}
public SseWorkerConnection(final String connectionType,
final String hostname,
final Integer port,
final Action1<Boolean> updateConxStatus,
final Action1<Boolean> updateDataRecvngStatus,
final Action1<Throwable> connectionResetHandler,
final long dataRecvTimeoutSecs,
final boolean reconnectUponConnectionReset,
final CopyOnWriteArraySet<MetricGroupId> metricsSet,
final int bufferSize,
final SinkParameters sinkParameters,
final boolean disablePingFiltering,
final MetricGroupId metricGroupId) {
this.connectionType = connectionType;
this.hostname = hostname;
this.port = port;
this.metricGroupId = metricGroupId;
final MetricGroupId connHealthMetricGroup = new MetricGroupId("ConnectionHealth");
Metrics m = new Metrics.Builder()
.id(connHealthMetricGroup)
.addCounter("pingCount")
.build();
this.pingCounter = m.getCounter("pingCount");
this.updateConxStatus = updateConxStatus;
this.updateDataRecvngStatus = updateDataRecvngStatus;
this.connectionResetHandler = connectionResetHandler;
this.dataRecvTimeoutSecs = dataRecvTimeoutSecs;
this.reconnectUponConnectionReset = reconnectUponConnectionReset;
this.metricsSet = metricsSet;
this.bufferSize = bufferSize;
this.sinkParameters = sinkParameters;
if (this.sinkParameters != null) {
this.compressedBinaryInputEnabled = isCompressedBinaryInputEnabled(this.sinkParameters.getSinkParams());
}
this.disablePingFiltering = disablePingFiltering;
}
private boolean isCompressedBinaryInputEnabled(List<SinkParameter> sinkParams) {
for (SinkParameter sinkParam : sinkParams) {
if (MantisSSEConstants.MANTIS_ENABLE_COMPRESSION.equals(sinkParam.getName()) && "true".equalsIgnoreCase(sinkParam.getValue())) {
return true;
}
}
return false;
}
public String getName() {
return "Sse" + connectionType + "Connection: " + hostname + ":" + port;
}
public synchronized void close() throws Exception {
logger.info("Closing sse connection to " + hostname + ":" + port);
if (isShutdown)
return;
shutdownSubject.onNext(true);
shutdownSubject.onCompleted();
isShutdown = true;
resetConnected();
}
private <I, O> HttpClientBuilder<I, O> newHttpClientBuilder(String host, int port) {
return new MantisHttpClientBuilder<I, O>(host, port).withMaxConnections(1000).enableWireLogging(LogLevel.DEBUG);
}
public synchronized Observable<MantisServerSentEvent> call() {
if (isShutdown)
return Observable.empty();
client = this.<ByteBuf, ServerSentEvent>newHttpClientBuilder(hostname, port)
.pipelineConfigurator(PipelineConfigurators.<ByteBuf>clientSseConfigurator())
//.enableWireLogging(LogLevel.ERROR)
.withNoConnectionPooling()
.build();
StringBuilder sp = new StringBuilder();
String delimiter = sinkParameters == null
? null
: sinkParameters.getSinkParams().stream()
.filter(s -> s.getName()
.equalsIgnoreCase(MantisSSEConstants.MANTIS_COMPRESSION_DELIMITER))
.findFirst()
.map(SinkParameter::getValue)
.orElse(null);
if (sinkParameters != null) {
sp.append(sinkParameters.toString());
}
sp.append(sp.length() == 0 ? getDefaultSinkParams("?") : getDefaultSinkParams("&"));
String uri = "/" + sp.toString();
logger.info(getName() + ": Using uri: " + uri);
return
client.submit(HttpClientRequest.createGet(uri))
.takeUntil(shutdownSubject)
.takeWhile((serverSentEventHttpClientResponse) -> !isShutdown)
.filter((HttpClientResponse<ServerSentEvent> response) -> {
if (!response.getStatus().reasonPhrase().equals("OK"))
logger.warn(getName() + ":Trying to continue after unexpected response from sink: "
+ response.getStatus().reasonPhrase());
return response.getStatus().reasonPhrase().equals("OK");
})
.flatMap((HttpClientResponse<ServerSentEvent> response) -> {
if (!isConnected.getAndSet(true)) {
if (updateConxStatus != null)
updateConxStatus.call(true);
}
return streamContent(response, updateDataRecvngStatus, dataRecvTimeoutSecs, delimiter);
})
.doOnError((Throwable throwable) -> {
resetConnected();
logger.warn(getName() +
"Error on getting response from SSE server: " + throwable.getMessage());
connectionResetHandler.call(throwable);
})
.retryWhen(retryLogic)
.doOnCompleted(this::resetConnected);
}
private void resetConnected() {
// explicitly close the connection
((MantisHttpClientImpl<?, ?>)client).closeConn();
if (isConnected.getAndSet(false)) {
if (updateConxStatus != null)
updateConxStatus.call(false);
}
if (isReceivingData.compareAndSet(true, false))
if (updateDataRecvngStatus != null)
synchronized (updateDataRecvngStatus) {
updateDataRecvngStatus.call(false);
}
}
protected Observable<MantisServerSentEvent> streamContent(HttpClientResponse<ServerSentEvent> response,
final Action1<Boolean> updateDataRecvngStatus,
final long dataRecvTimeoutSecs, String delimiter) {
long interval = Math.max(1, dataRecvTimeoutSecs / 2);
if (updateDataRecvngStatus != null) {
Observable.interval(interval, interval, TimeUnit.SECONDS)
.doOnNext((Long aLong) -> {
if (!isShutdown) {
if (hasDataDrop() || System.currentTimeMillis() > (lastDataReceived.get() + dataRecvTimeoutSecs * 1000)) {
if (isReceivingData.compareAndSet(true, false))
synchronized (updateDataRecvngStatus) {
updateDataRecvngStatus.call(false);
}
} else {
if (isConnected.get() && isReceivingData.compareAndSet(false, true))
synchronized (updateDataRecvngStatus) {
updateDataRecvngStatus.call(true);
}
}
}
})
.takeUntil(shutdownSubject)
.takeWhile((o) -> !isShutdown)
.doOnCompleted(() -> {
if (isReceivingData.compareAndSet(true, false))
synchronized (updateDataRecvngStatus) {
updateDataRecvngStatus.call(false);
}
})
.subscribe();
}
return response.getContent()
.lift(new DropOperator<ServerSentEvent>(metricGroupId))
.flatMap((ServerSentEvent t1) -> {
lastDataReceived.set(System.currentTimeMillis());
if (isConnected.get() && isReceivingData.compareAndSet(false, true))
if (updateDataRecvngStatus != null)
synchronized (updateDataRecvngStatus) {
updateDataRecvngStatus.call(true);
}
if (t1.hasEventType() && t1.getEventTypeAsString().startsWith("error:")) {
return Observable.error(new SseException(ErrorType.Retryable, "Got error SSE event: " + t1.contentAsString()));
}
return Observable.just(t1.contentAsString());
}, 1)
.filter(data -> {
if (data.startsWith("ping")) {
pingCounter.increment();
return this.disablePingFiltering;
}
return true;
})
.flatMapIterable((data) -> {
boolean useSnappy = true;
return CompressionUtils.decompressAndBase64Decode(data, compressedBinaryInputEnabled, useSnappy, delimiter);
}, 1)
.takeUntil(shutdownSubject)
.takeWhile((event) -> !isShutdown);
}
private boolean hasDataDrop() {
final Collection<Metrics> metrics = MetricsRegistry.getInstance().getMetrics(metricNamePrefix);
long totalDataDrop = 0L;
if (metrics != null && !metrics.isEmpty()) {
//logger.info("Got " + metrics.size() + " metrics for DropOperator");
for (Metrics m : metrics) {
final Counter dropped = m.getCounter("" + DropOperator.Counters.dropped);
final Counter onNext = m.getCounter("" + DropOperator.Counters.onNext);
if (dropped != null)
totalDataDrop += dropped.value();
}
}
if (totalDataDrop > lastDataDropValue) {
lastDataDropValue = totalDataDrop;
return true;
}
return false;
}
private String getDefaultSinkParams(String prefix) {
String groupId = System.getenv("JOB_ID");
String slotId = System.getenv("WORKER_INDEX");
String id = System.getenv("WORKER_NUMBER");
if (groupId != null && !groupId.isEmpty() && slotId != null && !slotId.isEmpty() && id != null && !id.isEmpty())
return prefix + "groupId=" + groupId + "&slotId=" + slotId + "&id=" + id;
return "";
}
private static enum ErrorType {
Retryable,
Unknown
}
private static class SseException extends RuntimeException {
private final ErrorType type;
private SseException(ErrorType type, String message) {
super(type + ": " + message);
this.type = type;
}
private SseException(ErrorType type, String message, Throwable cause) {
super(type + ": " + message, cause);
this.type = type;
}
}
}
| 8,405 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker/client/WorkerConnectionFunc.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.client;
import rx.functions.Action1;
import rx.functions.Func2;
public interface WorkerConnectionFunc<T> extends Func2<String, Integer, WorkerConnection<T>> {
WorkerConnection<T> call(String t1, Integer t2, Action1<Boolean> updateConxStatus,
Action1<Boolean> updateDataRecvngStatus, long dataRecvTimeoutSecs);
}
| 8,406 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker/client/MetricsClientImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.client;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.network.WorkerEndpoint;
import io.mantisrx.server.master.client.MasterClientWrapper;
import io.reactivex.mantis.remote.observable.EndpointChange;
import io.reactivx.mantis.operators.DropOperator;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.Subscriber;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
import rx.subscriptions.Subscriptions;
class MetricsClientImpl<T> implements MetricsClient<T> {
private static final Logger logger = LoggerFactory.getLogger(MetricsClientImpl.class);
final String jobId;
final WorkerConnectionFunc<T> workerConnectionFunc;
final JobWorkerMetricsLocator jobWorkerMetricsLocator;
final private AtomicBoolean nowClosed = new AtomicBoolean(false);
final private WorkerConnections workerConnections = new WorkerConnections();
private final String workersGuageName = "MetricsConnections";
private final String expectedWorkersGaugeName = "ExpectedMetricsConnections";
private final String workerConnReceivingDataGaugeName = "metricsRecvngData";
private final Gauge workersGauge;
private final Gauge expectedWorkersGauge;
private final Gauge workerConnReceivingDataGauge;
private final AtomicInteger numWorkers = new AtomicInteger();
private final Observer<WorkerConnectionsStatus> workerConnectionsStatusObserver;
private final long dataRecvTimeoutSecs;
MetricsClientImpl(String jobId, WorkerConnectionFunc<T> workerConnectionFunc, JobWorkerMetricsLocator jobWorkerMetricsLocator,
Observable<Integer> numWorkersObservable,
Observer<WorkerConnectionsStatus> workerConnectionsStatusObserver, long dataRecvTimeoutSecs) {
this.jobId = jobId;
this.workerConnectionFunc = workerConnectionFunc;
this.jobWorkerMetricsLocator = jobWorkerMetricsLocator;
Metrics metrics = new Metrics.Builder()
.name(MetricsClientImpl.class.getCanonicalName() + "-" + jobId)
.addGauge(workersGuageName)
.addGauge(expectedWorkersGaugeName)
.addGauge(workerConnReceivingDataGaugeName)
.build();
metrics = MetricsRegistry.getInstance().registerAndGet(metrics);
workersGauge = metrics.getGauge(workersGuageName);
expectedWorkersGauge = metrics.getGauge(expectedWorkersGaugeName);
workerConnReceivingDataGauge = metrics.getGauge(workerConnReceivingDataGaugeName);
numWorkersObservable
.doOnNext(new Action1<Integer>() {
@Override
public void call(Integer integer) {
numWorkers.set(integer);
}
})
.takeWhile(new Func1<Integer, Boolean>() {
@Override
public Boolean call(Integer integer) {
return !nowClosed.get();
}
})
.subscribe();
this.workerConnectionsStatusObserver = workerConnectionsStatusObserver;
this.dataRecvTimeoutSecs = dataRecvTimeoutSecs;
}
private String toWorkerConnName(String host, int port) {
return host + "-" + port;
}
@Override
public boolean hasError() {
return false;
}
@Override
public String getError() {
return null;
}
@Override
public Observable<Observable<T>> getResults() {
return Observable
.create(new Observable.OnSubscribe<Observable<T>>() {
@Override
public void call(final Subscriber subscriber) {
internalGetResults().subscribe(subscriber);
}
})
.subscribeOn(Schedulers.io());
}
private Observable<Observable<T>> internalGetResults() {
return jobWorkerMetricsLocator
.locateWorkerMetricsForJob(jobId)
.map(new Func1<EndpointChange, Observable<T>>() {
@Override
public Observable<T> call(EndpointChange endpointChange) {
if (nowClosed.get())
return Observable.empty();
if (endpointChange.getType() == EndpointChange.Type.complete) {
return handleEndpointClose(endpointChange);
} else {
return handleEndpointConnect(endpointChange);
}
}
})
.lift(new Observable.Operator<Observable<T>, Observable<T>>() {
@Override
public Subscriber<? super Observable<T>> call(Subscriber<? super Observable<T>> subscriber) {
subscriber.add(Subscriptions.create(new Action0() {
@Override
public void call() {
try {
logger.warn("Closing metrics connections to workers of job " + jobId);
closeAllConnections();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}));
return subscriber;
}
})
.share()
.lift(new DropOperator<Observable<T>>("client_metrics_share"))
;
}
private Observable<T> handleEndpointConnect(EndpointChange ec) {
logger.info("Opening connection to metrics sink at " + ec.toString());
final String unwrappedHost = MasterClientWrapper.getUnwrappedHost(ec.getEndpoint().getHost());
final int metricsPort;
if (ec.getEndpoint() instanceof WorkerEndpoint) {
metricsPort = ((WorkerEndpoint) ec.getEndpoint()).getMetricPort();
} else {
logger.error("endpoint received on Endpoint connect is not a WorkerEndpoint {}, no metrics port to connect to", ec.getEndpoint());
return Observable.empty();
}
WorkerConnection<T> workerConnection = workerConnectionFunc.call(unwrappedHost, metricsPort,
new Action1<Boolean>() {
@Override
public void call(Boolean flag) {
updateWorkerConx(flag);
}
},
new Action1<Boolean>() {
@Override
public void call(Boolean flag) {
updateWorkerDataReceivingStatus(flag);
}
},
dataRecvTimeoutSecs
);
if (nowClosed.get()) {// check if closed before adding
try {
workerConnection.close();
} catch (Exception e) {
logger.warn("Error closing worker metrics connection " + workerConnection.getName() + " - " + e.getMessage(), e);
}
return Observable.empty();
}
workerConnections.put(toWorkerConnName(unwrappedHost, metricsPort), workerConnection);
if (nowClosed.get()) {
try {
workerConnection.close();
workerConnections.remove(toWorkerConnName(unwrappedHost, metricsPort));
return Observable.empty();
} catch (Exception e) {
logger.warn("Error closing worker metrics connection - " + e.getMessage());
}
}
return workerConnection.call()
// .flatMap(new Func1<Observable<T>, Observable<T>>() {
// @Override
// public Observable<T> call(Observable<T> tObservable) {
// return tObservable;
// }
// })
;
}
private void updateWorkerDataReceivingStatus(Boolean flag) {
if (flag)
workerConnReceivingDataGauge.increment();
else
workerConnReceivingDataGauge.decrement();
expectedWorkersGauge.set(numWorkers.get());
if (workerConnectionsStatusObserver != null) {
synchronized (workerConnectionsStatusObserver) {
workerConnectionsStatusObserver.onNext(new WorkerConnectionsStatus(workerConnReceivingDataGauge.value(), workersGauge.value(), numWorkers.get()));
}
}
}
private void updateWorkerConx(Boolean flag) {
if (flag)
workersGauge.increment();
else
workersGauge.decrement();
expectedWorkersGauge.set(numWorkers.get());
if (workerConnectionsStatusObserver != null) {
synchronized (workerConnectionsStatusObserver) {
workerConnectionsStatusObserver.onNext(new WorkerConnectionsStatus(workerConnReceivingDataGauge.value(), workersGauge.value(), numWorkers.get()));
}
}
}
private Observable<T> handleEndpointClose(EndpointChange ec) {
logger.info("Closed connection to metrics sink at " + ec.toString());
final String unwrappedHost = MasterClientWrapper.getUnwrappedHost(ec.getEndpoint().getHost());
final int metricsPort;
if (ec.getEndpoint() instanceof WorkerEndpoint) {
metricsPort = ((WorkerEndpoint) ec.getEndpoint()).getMetricPort();
} else {
logger.warn("endpoint received on Endpoint close is not a WorkerEndpoint {}, worker endpoint required for metrics port", ec.getEndpoint());
return Observable.empty();
}
final WorkerConnection<T> removed = workerConnections.remove(toWorkerConnName(unwrappedHost, metricsPort));
if (removed != null) {
try {
removed.close();
} catch (Exception e) {
// shouldn't happen
logger.error("Unexpected exception on closing worker metrics connection: " + e.getMessage(), e);
}
}
return Observable.empty();
}
private void closeAllConnections() throws Exception {
nowClosed.set(true);
workerConnections.closeOut(new Action1<WorkerConnection<T>>() {
@Override
public void call(WorkerConnection<T> tWorkerConnection) {
try {
tWorkerConnection.close();
} catch (Exception e) {
logger.warn("Error closing worker metrics connection " + tWorkerConnection.getName() +
" - " + e.getMessage(), e);
}
}
});
}
class WorkerConnections {
final private Map<String, WorkerConnection<T>> workerConnections = new HashMap<>();
private boolean isClosed = false;
private void put(String key, WorkerConnection<T> val) {
synchronized (workerConnections) {
if (isClosed)
return;
workerConnections.put(key, val);
}
}
private WorkerConnection<T> remove(String key) {
synchronized (workerConnections) {
return workerConnections.remove(key);
}
}
private void closeOut(Action1<WorkerConnection<T>> onClose) {
synchronized (workerConnections) {
isClosed = true;
}
for (WorkerConnection<T> workerConnection : workerConnections.values()) {
logger.info("Closing " + workerConnection.getName());
onClose.call(workerConnection);
}
}
}
}
| 8,407 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker/client/WorkerConnection.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.client;
import rx.Observable;
import rx.functions.Func0;
public interface WorkerConnection<T> extends Func0<Observable<T>>, AutoCloseable {
public String getName();
}
| 8,408 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker-client/src/main/java/io/mantisrx/server/worker/client/MetricsClient.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.client;
import rx.Observable;
public interface MetricsClient<T> {
boolean hasError();
String getError();
Observable<Observable<T>> getResults();
}
| 8,409 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/WorkerExecutionOperationsNetworkStageTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import static junit.framework.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.WorkerInfo;
import io.mantisrx.runtime.WorkerMap;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.WorkerAssignments;
import io.mantisrx.server.core.WorkerHost;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import rx.Observable;
import rx.Subscription;
import rx.schedulers.Schedulers;
import rx.subjects.BehaviorSubject;
public class WorkerExecutionOperationsNetworkStageTest {
@Test
public void convertJobSchedulingInfoToWorkerMapTest() {
String jobName = "convertJobSchedulingInfoToWorkerMapTest";
String jobId = jobName + "-1";
MantisJobDurationType durationType = MantisJobDurationType.Perpetual;
WorkerAssignments workerAssignmentsStage1 = createWorkerAssignments(1, 2);
WorkerAssignments workerAssignmentsStage2 = createWorkerAssignments(2, 4);
Map<Integer, WorkerAssignments> workerAssignmentsMap = new HashMap<>();
workerAssignmentsMap.put(1, workerAssignmentsStage1);
workerAssignmentsMap.put(2, workerAssignmentsStage2);
JobSchedulingInfo jobSchedulingInfo = new JobSchedulingInfo(jobId, workerAssignmentsMap);
WorkerMap workerMap = WorkerExecutionOperationsNetworkStage.convertJobSchedulingInfoToWorkerMap(jobName, jobId, durationType, jobSchedulingInfo);
List<WorkerInfo> workersForStage1 = workerMap.getWorkersForStage(1);
assertTrue(workersForStage1 != null);
assertEquals(2, workersForStage1.size());
for (int i = 0; i < workersForStage1.size(); i++) {
WorkerInfo workerInfo = workersForStage1.get(i);
assertEquals(i, workerInfo.getWorkerIndex());
assertEquals(i + 1, workerInfo.getWorkerNumber());
assertEquals(durationType, workerInfo.getDurationType());
assertEquals(i + 2, workerInfo.getWorkerPorts().getMetricsPort());
assertEquals(i + 3, workerInfo.getWorkerPorts().getCustomPort());
}
List<WorkerInfo> workersForStage2 = workerMap.getWorkersForStage(2);
assertTrue(workersForStage2 != null);
assertEquals(4, workersForStage2.size());
for (int i = 0; i < workersForStage2.size(); i++) {
WorkerInfo workerInfo = workersForStage2.get(i);
assertEquals(i, workerInfo.getWorkerIndex());
assertEquals(i + 1, workerInfo.getWorkerNumber());
assertEquals(durationType, workerInfo.getDurationType());
assertEquals(i + 2, workerInfo.getWorkerPorts().getMetricsPort());
assertEquals(i + 3, workerInfo.getWorkerPorts().getCustomPort());
}
}
@Test
public void convertJobSchedulingInfoToWorkerMapInvalidInputTest() {
String jobName = "convertJobSchedulingInfoToWorkerMapInvalidInputTest";
String jobId = jobName + "-1";
MantisJobDurationType durationType = MantisJobDurationType.Perpetual;
WorkerAssignments workerAssignmentsStage1 = createWorkerAssignments(1, 2);
WorkerAssignments workerAssignmentsStage2 = createWorkerAssignments(2, 4);
Map<Integer, WorkerAssignments> workerAssignmentsMap = new HashMap<>();
workerAssignmentsMap.put(1, workerAssignmentsStage1);
workerAssignmentsMap.put(2, workerAssignmentsStage2);
JobSchedulingInfo jobSchedulingInfo = new JobSchedulingInfo(jobId, workerAssignmentsMap);
WorkerMap workerMap = WorkerExecutionOperationsNetworkStage.convertJobSchedulingInfoToWorkerMap(null, jobId, durationType, jobSchedulingInfo);
assertTrue(workerMap.isEmpty());
workerMap = WorkerExecutionOperationsNetworkStage.convertJobSchedulingInfoToWorkerMap(jobName, null, durationType, jobSchedulingInfo);
assertTrue(workerMap.isEmpty());
workerMap = WorkerExecutionOperationsNetworkStage.convertJobSchedulingInfoToWorkerMap(jobName, jobId, durationType, null);
assertTrue(workerMap.isEmpty());
jobSchedulingInfo = new JobSchedulingInfo(jobId, null);
workerMap = WorkerExecutionOperationsNetworkStage.convertJobSchedulingInfoToWorkerMap(jobName, jobId, durationType, jobSchedulingInfo);
assertTrue(workerMap.isEmpty());
workerAssignmentsMap = new HashMap<>();
workerAssignmentsMap.put(1, null);
workerAssignmentsMap.put(2, workerAssignmentsStage2);
jobSchedulingInfo = new JobSchedulingInfo(jobId, workerAssignmentsMap);
workerMap = WorkerExecutionOperationsNetworkStage.convertJobSchedulingInfoToWorkerMap(jobName, jobId, durationType, jobSchedulingInfo);
assertTrue(workerMap.isEmpty());
}
WorkerAssignments createWorkerAssignments(int stageNo, int noWorkers) {
Map<Integer, WorkerHost> workerHostMap = new HashMap<>();
for (int i = 0; i < noWorkers; i++) {
List<Integer> ports =
ImmutableList.of(i + 1);
workerHostMap.put(i, new WorkerHost("host" + i, i, ports, MantisJobState.Launched, i + 1, i + 2, i + 3));
}
return new WorkerAssignments(stageNo, noWorkers, workerHostMap);
}
@Test
public void deferTest() throws InterruptedException {
Subscription subscribe1 = getObs4().subscribeOn(Schedulers.io()).subscribe((t) -> {
System.out.println("In 1 -> " + t);
});
Thread.sleep(5000);
Subscription subscribe2 = getObs4().subscribeOn(Schedulers.io()).subscribe((t) -> {
System.out.println("In 2 -> " + t);
});
Thread.sleep(5000);
subscribe1.unsubscribe();
Thread.sleep(5000);
subscribe2.unsubscribe();
Thread.sleep(5000);
Subscription subscribe3 = getObs4().subscribeOn(Schedulers.io()).subscribe((t) -> {
System.out.println("In 3 -> " + t);
});
Thread.sleep(5000);
subscribe3.unsubscribe();
Thread.sleep(10000);
}
Observable<Long> getObs() {
Observable<Long> oLong = Observable.defer(() -> {
return Observable.interval(1, TimeUnit.SECONDS).doOnNext((e) -> {
System.out.println("Minted " + e);
}).share();
}).doOnSubscribe(() -> {
System.out.println("Subscribed111" + System.currentTimeMillis());
}).doOnUnsubscribe(() -> {
System.out.println("UnSubscribed111" + System.currentTimeMillis());
});
return oLong;
}
Observable<Long> getObs2() {
return Observable.interval(1, TimeUnit.SECONDS)
.doOnNext((e) -> {
System.out.println("Minted " + e);
})
.share()
.doOnSubscribe(() -> {
System.out.println("Subscribed111" + System.currentTimeMillis());
}).doOnUnsubscribe(() -> {
System.out.println("UnSubscribed111" + System.currentTimeMillis());
})
;
}
Observable<Long> getObs3() {
return Observable.range(1, 100).doOnNext((e) -> {
System.out.println("Minted " + e);
}).map((i) -> {
return new Long(i);
}).share()
.doOnSubscribe(() -> {
System.out.println("Subscribed111" + System.currentTimeMillis());
}).doOnUnsubscribe(() -> {
System.out.println("UnSubscribed111" + System.currentTimeMillis());
});
}
Observable<Long> getObs4() {
BehaviorSubject<Long> o = BehaviorSubject.create();
Observable.interval(1, TimeUnit.SECONDS).doOnNext((e) -> {
System.out.println("Minted " + e);
}).doOnSubscribe(() -> {
System.out.println("Subscribed111" + System.currentTimeMillis());
}).doOnUnsubscribe(() -> {
System.out.println("UnSubscribed111" + System.currentTimeMillis());
})
.subscribe(o);
return o;
}
// Observable<Long> getObs5() {
//
// Observable.create(new SyncOnSubscribe<BehaviorSubject<Long>, Long>() {
//
// @Override
// protected BehaviorSubject<Long> generateState() {
// BehaviorSubject<Long> subj = BehaviorSubject.create();
// Observable.interval(1, TimeUnit.SECONDS).subscribe(subj);
// return subj;
// }
//
// @Override
// protected BehaviorSubject<Long> next(BehaviorSubject<Long> state, Observer<? super Long> observer) {
// state.subscribe((t) -> {
// observer.onNext(t);
// });
// }
//
//
// });
//
// }
}
| 8,410 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/DataDroppedPayloadSetterTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import static com.mantisrx.common.utils.MantisMetricStringConstants.INCOMING;
import static io.mantisrx.server.core.stats.MetricStringConstants.DATA_DROP_METRIC_GROUP;
import static io.mantisrx.server.core.stats.MetricStringConstants.DROP_COUNT;
import static io.mantisrx.server.core.stats.MetricStringConstants.ON_NEXT_COUNT;
import static io.reactivx.mantis.operators.DropOperator.METRIC_GROUP;
import static org.junit.Assert.assertEquals;
import com.netflix.spectator.api.DefaultRegistry;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.common.metrics.spectator.SpectatorRegistryFactory;
import io.reactivx.mantis.operators.DropOperator;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DataDroppedPayloadSetterTest {
private static final Logger logger = LoggerFactory.getLogger(DataDroppedPayloadSetterTest.class);
@Test
public void testAggregateDropOperatorMetrics() throws Exception {
SpectatorRegistryFactory.setRegistry(new DefaultRegistry());
Heartbeat heartbeat = new Heartbeat("job-1", 1, 1, 1);
DataDroppedPayloadSetter payloadSetter = new DataDroppedPayloadSetter(heartbeat);
Metrics m = new Metrics.Builder()
.id(METRIC_GROUP + "_" + INCOMING + "_metric1")
.addCounter(DropOperator.Counters.dropped.toString())
.addCounter(DropOperator.Counters.onNext.toString())
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
m.getCounter(DropOperator.Counters.dropped.toString()).increment(1);
m.getCounter(DropOperator.Counters.onNext.toString()).increment(10);
m = new Metrics.Builder()
.id(METRIC_GROUP + "_" + INCOMING + "_metric2")
.addCounter(DropOperator.Counters.dropped.toString())
.addCounter(DropOperator.Counters.onNext.toString())
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
m.getCounter(DropOperator.Counters.dropped.toString()).increment(100);
m.getCounter(DropOperator.Counters.onNext.toString()).increment(1000);
payloadSetter.setPayload(30);
m = MetricsRegistry.getInstance().getMetric(new MetricGroupId(DATA_DROP_METRIC_GROUP));
assertEquals(101L, m.getGauge(DROP_COUNT).value());
assertEquals(1010, m.getGauge(ON_NEXT_COUNT).value());
}
}
| 8,411 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/HeartbeatTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.StatusPayloads;
import java.util.List;
import junit.framework.Assert;
import org.junit.Test;
public class HeartbeatTest {
@Test
public void testSingleUsePayloads() throws Exception {
Heartbeat heartbeat = new Heartbeat("Jobcluster-123", 1, 0, 0);
heartbeat.setPayload("" + StatusPayloads.Type.SubscriptionState, "true");
int val1 = 10;
int val2 = 12;
heartbeat.addSingleUsePayload("" + StatusPayloads.Type.IncomingDataDrop, "" + val1);
heartbeat.addSingleUsePayload("" + StatusPayloads.Type.IncomingDataDrop, "" + val2);
final Status currentHeartbeatStatus = heartbeat.getCurrentHeartbeatStatus();
List<Status.Payload> payloads = currentHeartbeatStatus.getPayloads();
Assert.assertEquals(2, payloads.size());
int value = 0;
for (Status.Payload p : payloads) {
if (StatusPayloads.Type.valueOf(p.getType()) == StatusPayloads.Type.IncomingDataDrop)
value = Integer.parseInt(p.getData());
}
Assert.assertEquals(val2, value);
payloads = heartbeat.getCurrentHeartbeatStatus().getPayloads();
Assert.assertEquals(1, payloads.size());
}
}
| 8,412 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/SourceJobWorkerMetricsSubscriptionTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.*;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.runtime.parameter.SourceJobParameters;
import io.mantisrx.server.core.NamedJobInfo;
import io.mantisrx.server.master.client.MantisMasterClientApi;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import io.mantisrx.shaded.com.google.common.collect.ImmutableSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import rx.Observable;
public class SourceJobWorkerMetricsSubscriptionTest {
@Test
public void testGetSourceJobToClientMap() {
List<SourceJobParameters.TargetInfo> infos = ImmutableList.of(
new SourceJobParameters.TargetInfoBuilder().withSourceJobName("jobA").withQuery("criterion").withClientId("client1").build(),
new SourceJobParameters.TargetInfoBuilder().withSourceJobName("jobA").withQuery("criterion").withClientId("client2").build(),
new SourceJobParameters.TargetInfoBuilder().withSourceJobName("jobB").withQuery("criterion").withClientId("client1").build(),
new SourceJobParameters.TargetInfoBuilder().withSourceJobName("jobB").withQuery("criterion").withClientId("client3").build()
);
SourceJobWorkerMetricsSubscription sub = new SourceJobWorkerMetricsSubscription(infos, null, null, null);
Map<String, Set<String>> results = sub.getSourceJobToClientMap();
Map<String, Set<String>> expected = ImmutableMap.of("jobA", ImmutableSet.of("client1", "client2"),
"jobB", ImmutableSet.of("client1", "client3"));
assertEquals(expected, results);
}
@Test
public void testGetResultsForAllSourceJobs() throws Exception {
List<SourceJobParameters.TargetInfo> infos = ImmutableList.of(
new SourceJobParameters.TargetInfoBuilder().withSourceJobName("jobA").withQuery("criterion").withClientId("client1").build(),
new SourceJobParameters.TargetInfoBuilder().withSourceJobName("jobA").withQuery("criterion").withClientId("client2").build(),
new SourceJobParameters.TargetInfoBuilder().withSourceJobName("jobB").withQuery("criterion").withClientId("client1").build(),
new SourceJobParameters.TargetInfoBuilder().withSourceJobName("jobB").withQuery("criterion").withClientId("client3").build()
);
MantisMasterClientApi masterClient = mock(MantisMasterClientApi.class);
SourceJobWorkerMetricsSubscription sub = spy(new SourceJobWorkerMetricsSubscription(infos, masterClient, null, new AutoScaleMetricsConfig()));
when(masterClient.namedJobInfo("jobA")).thenReturn(Observable.just(new NamedJobInfo("jobA", "jobA-1")));
when(masterClient.namedJobInfo("jobB")).thenReturn(Observable.just(new NamedJobInfo("jobA", "jobB-2")));
doReturn(Observable.just(Observable.just(new MantisServerSentEvent("jobA-event")))).when(sub).getResultsForJobId(eq("jobA-1"), any());
doReturn(Observable.just(Observable.just(new MantisServerSentEvent("jobB-event")))).when(sub).getResultsForJobId(eq("jobB-2"), any());
CountDownLatch latch = new CountDownLatch(2);
Observable.merge(sub.getResults()).doOnNext(event -> {
if ("jobA-event".equals(event.getEventAsString()) || "jobB-event".equals(event.getEventAsString())) {
latch.countDown();
}
}).subscribe();
latch.await(10, TimeUnit.SECONDS);
assertEquals(0, latch.getCount());
Set<String> jobAMetrics = ImmutableSet.of("PushServerSse:clientId=client1:*", "PushServerSse:clientId=client2:*",
"ServerSentEventRequestHandler:clientId=client1:*", "ServerSentEventRequestHandler:clientId=client2:*");
verify(sub, times(1)).getResultsForJobId("jobA-1", jobAMetrics);
jobAMetrics = ImmutableSet.of("PushServerSse:clientId=client1:*", "PushServerSse:clientId=client3:*",
"ServerSentEventRequestHandler:clientId=client1:*", "ServerSentEventRequestHandler:clientId=client3:*");
verify(sub, times(1)).getResultsForJobId("jobB-2", jobAMetrics);
}
}
| 8,413 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/WorkerMetricHandlerTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import static io.mantisrx.server.core.stats.MetricStringConstants.DATA_DROP_METRIC_GROUP;
import static io.mantisrx.server.core.stats.MetricStringConstants.KAFKA_CONSUMER_FETCH_MGR_METRIC_GROUP;
import static io.reactivex.mantis.network.push.PushServerSse.DROPPED_COUNTER_METRIC_NAME;
import static io.reactivex.mantis.network.push.PushServerSse.PROCESSED_COUNTER_METRIC_NAME;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import io.mantisrx.common.metrics.measurement.GaugeMeasurement;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.WorkerAssignments;
import io.mantisrx.server.core.WorkerHost;
import io.mantisrx.server.core.stats.MetricStringConstants;
import io.mantisrx.server.master.client.MantisMasterClientApi;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.functions.Func1;
public class WorkerMetricHandlerTest {
private static final Logger logger = LoggerFactory.getLogger(WorkerMetricHandlerTest.class);
@Test
public void testDropDataMetricTriggersAutoScale() throws InterruptedException {
final String jobId = "test-job-1";
final int stage = 1;
final int workerIdx = 0;
final int workerNum = 1;
final int dropCount = 1;
final int onNextCount = 9;
final double dropPercent = dropCount * 100.0 / (dropCount + onNextCount);
final List<GaugeMeasurement> gauges = Arrays.asList(
new GaugeMeasurement(MetricStringConstants.ON_NEXT_COUNT, onNextCount),
new GaugeMeasurement(MetricStringConstants.DROP_COUNT, dropCount));
final MantisMasterClientApi mockMasterClientApi = mock(MantisMasterClientApi.class);
final Map<Integer, WorkerAssignments> assignmentsMap = new HashMap<>();
assignmentsMap.put(stage, new WorkerAssignments(stage, 1,
Collections.singletonMap(1, new WorkerHost("localhost", workerIdx, Arrays.asList(31300), MantisJobState.Started, workerNum, 31301, -1))));
when(mockMasterClientApi.schedulingChanges(jobId)).thenReturn(Observable.just(new JobSchedulingInfo(jobId, assignmentsMap)));
final CountDownLatch latch = new CountDownLatch(1);
final AutoScaleMetricsConfig aggregationConfig = new AutoScaleMetricsConfig();
final WorkerMetricHandler workerMetricHandler = new WorkerMetricHandler(jobId, new Observer<JobAutoScaler.Event>() {
@Override
public void onCompleted() {
logger.warn("onCompleted");
}
@Override
public void onError(Throwable e) {
logger.warn("onError {}", e.getMessage(), e);
}
@Override
public void onNext(JobAutoScaler.Event event) {
logger.info("got auto scale event {}", event);
JobAutoScaler.Event expected = new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.DataDrop, stage, dropPercent, 1, "");
assertEquals(expected, event);
latch.countDown();
}
}, mockMasterClientApi, aggregationConfig);
final Observer<MetricData> metricDataObserver = workerMetricHandler.initAndGetMetricDataObserver();
// Purposely create a new String for jobId
metricDataObserver.onNext(new MetricData(new String(jobId), stage, workerIdx, workerNum, DATA_DROP_METRIC_GROUP, gauges));
assertTrue(latch.await(30 + 5/* leeway */, TimeUnit.SECONDS));
}
@Test
public void testKafkaLagAndUserDefinedTriggersAutoScale() throws InterruptedException {
final String jobId = "test-job-1";
final int stage = 1;
final int workerIdx = 0;
final int workerNum = 1;
final int kafkaLag = 10_000;
final int numWorkers = 2;
final int metricValue = 1_000;
final String testMetricGroup = "testMetricGroup";
final String testMetricName = "testMetricName";
final List<GaugeMeasurement> worker1KafkaGauges = Arrays.asList(
new GaugeMeasurement(MetricStringConstants.KAFKA_LAG, kafkaLag / 2));
final List<GaugeMeasurement> worker2KafkaGauges = Arrays.asList(
new GaugeMeasurement(MetricStringConstants.KAFKA_LAG, kafkaLag));
final List<GaugeMeasurement> gauges1 = Arrays.asList(
new GaugeMeasurement(testMetricName, metricValue / 2));
final List<GaugeMeasurement> gauges2 = Arrays.asList(
new GaugeMeasurement(testMetricName, metricValue));
final MantisMasterClientApi mockMasterClientApi = mock(MantisMasterClientApi.class);
final Map<Integer, WorkerAssignments> assignmentsMap = new HashMap<>();
final Map<Integer, WorkerHost> hosts = new HashMap<>();
hosts.put(1, new WorkerHost("localhost", workerIdx, Arrays.asList(31300), MantisJobState.Started, workerNum, 31301, -1));
hosts.put(2, new WorkerHost("localhost", workerIdx + 1, Arrays.asList(31305), MantisJobState.Started, workerNum + 1, 31316, -1));
assignmentsMap.put(stage, new WorkerAssignments(stage, numWorkers, hosts));
when(mockMasterClientApi.schedulingChanges(jobId)).thenReturn(Observable.just(new JobSchedulingInfo(jobId, assignmentsMap)));
final CountDownLatch latch = new CountDownLatch(2);
final AutoScaleMetricsConfig aggregationConfig = new AutoScaleMetricsConfig(Collections.singletonMap(testMetricGroup, Collections.singletonMap(testMetricName, AutoScaleMetricsConfig.AggregationAlgo.AVERAGE)));
final WorkerMetricHandler workerMetricHandler = new WorkerMetricHandler(jobId, new Observer<JobAutoScaler.Event>() {
@Override
public void onCompleted() {
logger.warn("onCompleted");
}
@Override
public void onError(Throwable e) {
logger.warn("onError {}", e.getMessage(), e);
}
@Override
public void onNext(JobAutoScaler.Event event) {
logger.info("got auto scale event {}", event);
final long count = latch.getCount();
if (count == 2) {
JobAutoScaler.Event expected1 = new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.UserDefined, stage, metricValue * 3 / 4, numWorkers, "");
assertEquals(expected1, event);
latch.countDown();
}
if (count == 1) {
JobAutoScaler.Event expected2 = new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.KafkaLag, stage, kafkaLag, numWorkers, "");
assertEquals(expected2, event);
latch.countDown();
}
}
}, mockMasterClientApi, aggregationConfig);
final Observer<MetricData> metricDataObserver = workerMetricHandler.initAndGetMetricDataObserver();
metricDataObserver.onNext(new MetricData(jobId, stage, workerIdx, workerNum, KAFKA_CONSUMER_FETCH_MGR_METRIC_GROUP, worker1KafkaGauges));
metricDataObserver.onNext(new MetricData(jobId, stage, workerIdx + 1, workerNum + 1, KAFKA_CONSUMER_FETCH_MGR_METRIC_GROUP, worker2KafkaGauges));
metricDataObserver.onNext(new MetricData(jobId, stage, workerIdx, workerNum, testMetricGroup, gauges1));
metricDataObserver.onNext(new MetricData(jobId, stage, workerIdx + 1, workerNum + 1, testMetricGroup, gauges2));
assertTrue(latch.await(30 + 5/* leeway */, TimeUnit.SECONDS));
}
@Test
public void testOutlierResubmitWorks() throws InterruptedException {
final String jobId = "test-job-1";
final int stage = 1;
final int workerIdx = 0;
final int workerNum = 1;
final int numWorkers = 3;
final int dropCount = 1;
final int onNextCount = 9;
final double dropPercent = dropCount * 100.0 / (dropCount + onNextCount);
final List<GaugeMeasurement> outlierDropGauges = Arrays.asList(
new GaugeMeasurement(MetricStringConstants.ON_NEXT_COUNT, onNextCount),
new GaugeMeasurement(MetricStringConstants.DROP_COUNT, dropCount));
final List<GaugeMeasurement> zeroDropGauges = Arrays.asList(
new GaugeMeasurement(MetricStringConstants.ON_NEXT_COUNT, onNextCount),
new GaugeMeasurement(MetricStringConstants.DROP_COUNT, 0));
final MantisMasterClientApi mockMasterClientApi = mock(MantisMasterClientApi.class);
final Map<Integer, WorkerAssignments> assignmentsMap = new HashMap<>();
Map<Integer, WorkerHost> hosts = new HashMap<>();
hosts.put(workerNum, new WorkerHost("localhost", workerIdx, Arrays.asList(31300), MantisJobState.Started, workerNum, 31301, -1));
hosts.put(workerNum + 1, new WorkerHost("localhost", workerIdx + 1, Arrays.asList(31302), MantisJobState.Started, workerNum, 31303, -1));
hosts.put(workerNum + 2, new WorkerHost("localhost", workerIdx + 2, Arrays.asList(31304), MantisJobState.Started, workerNum, 31305, -1));
assignmentsMap.put(stage, new WorkerAssignments(stage, numWorkers, hosts));
final CountDownLatch resubmitLatch = new CountDownLatch(1);
final CountDownLatch autoScaleLatch = new CountDownLatch(1);
when(mockMasterClientApi.schedulingChanges(jobId)).thenReturn(Observable.just(new JobSchedulingInfo(jobId, assignmentsMap)));
when(mockMasterClientApi.resubmitJobWorker(anyString(), anyString(), anyInt(), anyString())).thenAnswer(new Answer<Observable<Boolean>>() {
@Override
public Observable<Boolean> answer(InvocationOnMock invocation) throws Throwable {
final Object[] arguments = invocation.getArguments();
final String jobIdRecv = (String) arguments[0];
final String user = (String) arguments[1];
final int resubmittedWorkerNum = (Integer) arguments[2];
// final String reason = (String)arguments[3];
final Observable<Boolean> result = Observable.just(1).map(new Func1<Integer, Boolean>() {
@Override
public Boolean call(Integer integer) {
logger.info("resubmitting worker {} of jobId {}", resubmittedWorkerNum, jobId);
assertEquals(workerNum, resubmittedWorkerNum);
assertEquals(user, "JobMaster");
assertEquals(jobId, jobIdRecv);
resubmitLatch.countDown();
return true;
}
});
return result;
}
});
final AutoScaleMetricsConfig aggregationConfig = new AutoScaleMetricsConfig();
final WorkerMetricHandler workerMetricHandler = new WorkerMetricHandler(jobId, new Observer<JobAutoScaler.Event>() {
@Override
public void onCompleted() {
logger.warn("onCompleted");
}
@Override
public void onError(Throwable e) {
logger.warn("onError {}", e.getMessage(), e);
}
@Override
public void onNext(JobAutoScaler.Event event) {
logger.info("got auto scale event {}", event);
JobAutoScaler.Event expected = new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.DataDrop, stage, dropPercent / numWorkers, numWorkers, "");
assertEquals(expected, event);
autoScaleLatch.countDown();
}
}, mockMasterClientApi, aggregationConfig);
final Observer<MetricData> metricDataObserver = workerMetricHandler.initAndGetMetricDataObserver();
final int minDataPointsForOutlierTrigger = 16;
for (int i = 0; i <= minDataPointsForOutlierTrigger; i++) {
metricDataObserver.onNext(new MetricData(jobId, stage, workerIdx, workerNum,
DATA_DROP_METRIC_GROUP, outlierDropGauges));
metricDataObserver.onNext(new MetricData(jobId, stage, workerIdx + 1, workerNum + 1,
DATA_DROP_METRIC_GROUP, zeroDropGauges));
metricDataObserver.onNext(new MetricData(jobId, stage, workerIdx + 2, workerNum + 2,
DATA_DROP_METRIC_GROUP, zeroDropGauges));
}
assertTrue(resubmitLatch.await(30, TimeUnit.SECONDS));
assertTrue(autoScaleLatch.await(30 + 5/* leeway */, TimeUnit.SECONDS));
}
@Test
public void testSourceJobDropMetricTriggersAutoScale() throws InterruptedException {
final String jobId = "test-job-1";
final String sourceJobId = "source-test-job-1";
final int stage = 1;
final MantisMasterClientApi mockMasterClientApi = mock(MantisMasterClientApi.class);
final Map<Integer, WorkerAssignments> assignmentsMap = new HashMap<>();
assignmentsMap.put(stage, new WorkerAssignments(stage, 2,
ImmutableMap.of(1, new WorkerHost("1.1.1.1", 0, Arrays.asList(31300), MantisJobState.Started, 1, 31301, -1),
2, new WorkerHost("2.2.2.2", 1, Arrays.asList(31300), MantisJobState.Started, 2, 31301, -1))));
when(mockMasterClientApi.schedulingChanges(jobId)).thenReturn(Observable.just(new JobSchedulingInfo(jobId, assignmentsMap)));
final CountDownLatch latch = new CountDownLatch(1);
final AutoScaleMetricsConfig aggregationConfig = new AutoScaleMetricsConfig();
final WorkerMetricHandler workerMetricHandler = new WorkerMetricHandler(jobId, new Observer<JobAutoScaler.Event>() {
@Override
public void onCompleted() {
logger.warn("onCompleted");
}
@Override
public void onError(Throwable e) {
logger.warn("onError {}", e.getMessage(), e);
}
@Override
public void onNext(JobAutoScaler.Event event) {
logger.info("got auto scale event {}", event);
// Expected metric value should be (1 + 2 + 3 + 6) / 6.0 / 2
JobAutoScaler.Event expected = new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.SourceJobDrop, stage, 1.0, 2, "");
if (expected.equals(event)) {
latch.countDown();
}
}
}, mockMasterClientApi, aggregationConfig);
final Observer<MetricData> metricDataObserver = workerMetricHandler.initAndGetMetricDataObserver();
List<GaugeMeasurement> gauges = Arrays.asList(
new GaugeMeasurement(PROCESSED_COUNTER_METRIC_NAME, 10.0),
new GaugeMeasurement(DROPPED_COUNTER_METRIC_NAME, 1.0));
// Source job worker 0 -> job worker 0
metricDataObserver.onNext(new MetricData(sourceJobId, stage, 0, 1, "ServerSentEventRequestHandler:clientId=" + jobId + ":sockAddr=/1.1.1.1", gauges));
gauges = Arrays.asList(
new GaugeMeasurement(PROCESSED_COUNTER_METRIC_NAME, 20.0),
new GaugeMeasurement(DROPPED_COUNTER_METRIC_NAME, 2.0));
// Source job worker 0 -> job worker 1
metricDataObserver.onNext(new MetricData(sourceJobId, stage, 0, 1, "ServerSentEventRequestHandler:clientId=" + jobId + ":sockAddr=/2.2.2.2", gauges));
gauges = Arrays.asList(
new GaugeMeasurement(PROCESSED_COUNTER_METRIC_NAME, 30.0),
new GaugeMeasurement(DROPPED_COUNTER_METRIC_NAME, 3.0));
// Source job worker 1 -> job worker 0
metricDataObserver.onNext(new MetricData(sourceJobId, stage, 1, 2, "ServerSentEventRequestHandler:clientId=" + jobId + ":sockAddr=/1.1.1.1", gauges));
gauges = Arrays.asList(
new GaugeMeasurement(PROCESSED_COUNTER_METRIC_NAME, 60.0),
new GaugeMeasurement(DROPPED_COUNTER_METRIC_NAME, 6.0));
// Source job worker 1 -> job worker 1
metricDataObserver.onNext(new MetricData(sourceJobId, stage, 1, 2, "ServerSentEventRequestHandler:clientId=" + jobId + ":sockAddr=/2.2.2.2", gauges));
// Another datapoint from source job worker 1 -> job worker 1 to verify MAX aggregation
gauges = Arrays.asList(
new GaugeMeasurement(PROCESSED_COUNTER_METRIC_NAME, 50.0),
new GaugeMeasurement(DROPPED_COUNTER_METRIC_NAME, 5.0));
metricDataObserver.onNext(new MetricData(sourceJobId, stage, 1, 2, "ServerSentEventRequestHandler:clientId=" + jobId + ":sockAddr=/2.2.2.2", gauges));
assertTrue(latch.await(30 + 5/* leeway */, TimeUnit.SECONDS));
}
}
| 8,414 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/AutoScaleMetricsConfigTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import static io.reactivex.mantis.network.push.PushServerSse.DROPPED_COUNTER_METRIC_NAME;
import static io.reactivex.mantis.network.push.PushServerSse.PROCESSED_COUNTER_METRIC_NAME;
import static org.junit.Assert.*;
import io.mantisrx.shaded.com.google.common.collect.ImmutableSet;
import java.util.Set;
import org.junit.Test;
public class AutoScaleMetricsConfigTest {
@Test
public void testGenerateSourceJobMetricGroups() {
AutoScaleMetricsConfig config = new AutoScaleMetricsConfig();
Set<String> groups = config.generateSourceJobMetricGroups(ImmutableSet.of("clientId1", "client-id-2"));
Set<String> expected = ImmutableSet.of("PushServerSse:clientId=clientId1:*", "PushServerSse:clientId=client-id-2:*",
"ServerSentEventRequestHandler:clientId=clientId1:*", "ServerSentEventRequestHandler:clientId=client-id-2:*");
assertEquals(expected, groups);
}
@Test
public void testGetAggregationAlgoForSourceJobMetrics() throws Exception {
AutoScaleMetricsConfig config = new AutoScaleMetricsConfig();
AutoScaleMetricsConfig.AggregationAlgo aglo = config.getAggregationAlgo(
"ServerSentEventRequestHandler:clientId=RavenConnectorJob-1657357:sockAddr=/100.87.51.222", DROPPED_COUNTER_METRIC_NAME);
assertEquals(AutoScaleMetricsConfig.AggregationAlgo.MAX, aglo);
assertTrue(config.isSourceJobDropMetric("ServerSentEventRequestHandler:clientId=RavenConnectorJob-1657357:sockAddr=/100.87.51.222", DROPPED_COUNTER_METRIC_NAME));
aglo = config.getAggregationAlgo(
"PushServerSse:clientId=RavenConnectorJob-1657357:sockAddr=/100.87.51.222", DROPPED_COUNTER_METRIC_NAME);
assertEquals(AutoScaleMetricsConfig.AggregationAlgo.MAX, aglo);
aglo = config.getAggregationAlgo(
"PushServerSse:clientId=RavenConnectorJob-1657357:sockAddr=/100.87.51.222", PROCESSED_COUNTER_METRIC_NAME);
assertEquals(AutoScaleMetricsConfig.AggregationAlgo.AVERAGE, aglo);
aglo = config.getAggregationAlgo(
"ABCServerSentEventRequestHandler:clientId=RavenConnectorJob-1657357:sockAddr=/100.87.51.222", DROPPED_COUNTER_METRIC_NAME);
assertEquals(AutoScaleMetricsConfig.AggregationAlgo.AVERAGE, aglo);
aglo = config.getAggregationAlgo(
"PushServerSse:clientId=ABC:DEF", DROPPED_COUNTER_METRIC_NAME);
assertEquals(AutoScaleMetricsConfig.AggregationAlgo.MAX, aglo);
}
@Test
public void testAddSourceJobDropMetrics() {
AutoScaleMetricsConfig config = new AutoScaleMetricsConfig();
config.addSourceJobDropMetrics("myDropGroup1:clientId=_CLIENT_ID_:*::myDropCounter::MAX");
AutoScaleMetricsConfig.AggregationAlgo aglo = config.getAggregationAlgo(
"myDropGroup1:clientId=RavenConnectorJob-1657357:sockAddr=/100.87.51.222", "myDropCounter");
assertEquals(AutoScaleMetricsConfig.AggregationAlgo.MAX, aglo);
assertTrue(config.isSourceJobDropMetric("myDropGroup1:clientId=RavenConnectorJob-1657357:sockAddr=/100.87.51.222", "myDropCounter"));
assertFalse(config.isSourceJobDropMetric("ABCmyDropGroup1:clientId=RavenConnectorJob-1657357:sockAddr=/100.87.51.222", "myDropCounter"));
assertTrue(config.isSourceJobDropMetric("ServerSentEventRequestHandler:clientId=RavenConnectorJob-1657357:sockAddr=/100.87.51.222", DROPPED_COUNTER_METRIC_NAME));
}
@Test
public void testAddSourceJobDropMetricsThrowsException() {
AutoScaleMetricsConfig config = new AutoScaleMetricsConfig();
try {
config.addSourceJobDropMetrics("InvalidMetricFormat");
fail();
} catch (Exception ex) {
// pass
}
}
@Test
public void testAddSourceJobDropMetricsEmptyString() {
AutoScaleMetricsConfig config = new AutoScaleMetricsConfig();
config.addSourceJobDropMetrics(null);
config.addSourceJobDropMetrics("");
assertTrue(config.isSourceJobDropMetric("ServerSentEventRequestHandler:clientId=RavenConnectorJob-1657357:sockAddr=/100.87.51.222", DROPPED_COUNTER_METRIC_NAME));
}
}
| 8,415 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/JobAutoScalerTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import static io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason.DataDrop;
import static io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason.KafkaLag;
import static io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason.UserDefined;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.*;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.server.master.client.MantisMasterClientApi;
import io.mantisrx.server.worker.jobmaster.clutch.ClutchConfiguration;
import io.mantisrx.server.worker.jobmaster.clutch.rps.ClutchRpsPIDConfig;
import io.vavr.Tuple;
import io.vavr.control.Option;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.functions.Func1;
public class JobAutoScalerTest {
private static final Logger logger = LoggerFactory.getLogger(JobAutoScalerTest.class);
@Test
public void testScaleUp() throws InterruptedException {
final String jobId = "test-job-1";
final int coolDownSec = 2;
final int scalingStageNum = 1;
final MantisMasterClientApi mockMasterClientApi = mock(MantisMasterClientApi.class);
final Map<Integer, StageSchedulingInfo> schedulingInfoMap = new HashMap<>();
final int numStage1Workers = 1;
final int increment = 1;
final int decrement = 1;
final int min = 1;
final int max = 5;
final double scaleUpAbovePct = 45.0;
final double scaleDownBelowPct = 15.0;
final double workerMemoryMB = 512.0;
final StageSchedulingInfo stage1SchedInfo = StageSchedulingInfo.builder()
.numberOfInstances(numStage1Workers)
.machineDefinition(new MachineDefinition(2, workerMemoryMB, 200, 1024, 2))
.scalingPolicy(new StageScalingPolicy(scalingStageNum, min, max, increment, decrement, coolDownSec,
Collections.singletonMap(StageScalingPolicy.ScalingReason.Memory,
new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.Memory, scaleDownBelowPct, scaleUpAbovePct, new StageScalingPolicy.RollingCount(1, 2)))))
.scalable(true)
.build();
schedulingInfoMap.put(scalingStageNum, stage1SchedInfo);
when(mockMasterClientApi.scaleJobStage(eq(jobId), eq(scalingStageNum), eq(numStage1Workers + increment), anyString())).thenReturn(Observable.just(true));
Context context = mock(Context.class);
when(context.getWorkerMapObservable()).thenReturn(Observable.empty());
final JobAutoScaler jobAutoScaler = new JobAutoScaler(jobId, new SchedulingInfo(schedulingInfoMap), mockMasterClientApi, context);
jobAutoScaler.start();
final Observer<JobAutoScaler.Event> jobAutoScalerObserver = jobAutoScaler.getObserver();
// should trigger a scale up (above 45% scaleUp threshold)
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Memory, scalingStageNum, workerMemoryMB * (scaleUpAbovePct / 100.0 + 0.01), numStage1Workers, ""));
verify(mockMasterClientApi, timeout(1000).times(1)).scaleJobStage(jobId, scalingStageNum, numStage1Workers + increment, String.format("Memory with value %1$,.2f exceeded scaleUp threshold of 45.0", (scaleUpAbovePct / 100.0 + 0.01) * 100.0));
// should *not* trigger a scale up before cooldown period (above 45% scaleUp threshold)
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Memory, scalingStageNum, workerMemoryMB * (scaleUpAbovePct / 100.0 + 0.01), numStage1Workers + increment, ""));
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Memory, scalingStageNum, workerMemoryMB * (scaleUpAbovePct / 100.0 + 0.01), numStage1Workers + increment, ""));
Thread.sleep(coolDownSec * 1000);
// retry sending auto scale event till scaleJobStage request sent to master, as there is possible a race between the sleep for coolDownSecs in the Test and the event being processed before coolDownSecs
final CountDownLatch retryLatch = new CountDownLatch(1);
when(mockMasterClientApi.scaleJobStage(eq(jobId), eq(scalingStageNum), eq(numStage1Workers + 2 * increment), anyString())).thenAnswer(new Answer<Observable<Void>>() {
@Override
public Observable<Void> answer(InvocationOnMock invocation) throws Throwable {
retryLatch.countDown();
return Observable.just(null);
}
});
do {
logger.info("sending Job auto scale Event");
// should trigger a scale up after cooldown period (above 45% scaleUp threshold)
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Memory, scalingStageNum, workerMemoryMB * (scaleUpAbovePct / 100.0 + 0.01), numStage1Workers + increment, ""));
} while (!retryLatch.await(1, TimeUnit.SECONDS));
verify(mockMasterClientApi, timeout(1000).times(1)).scaleJobStage(jobId, scalingStageNum, numStage1Workers + 2 * increment, String.format("Memory with value %1$,.2f exceeded scaleUp threshold of 45.0", (scaleUpAbovePct / 100.0 + 0.01) * 100.0));
}
@Test
public void testScalingResiliency() throws InterruptedException {
final String jobId = "test-job-1";
final int coolDownSec = 2;
final int scalingStageNum = 1;
final MantisMasterClientApi mockMasterClientApi = mock(MantisMasterClientApi.class);
final Map<Integer, StageSchedulingInfo> schedulingInfoMap = new HashMap<>();
final int numStage1Workers = 1;
final int increment = 1;
final int decrement = 1;
final int min = 1;
final int max = 5;
final double scaleUpAbovePct = 45.0;
final double scaleDownBelowPct = 15.0;
final double workerMemoryMB = 512.0;
final StageSchedulingInfo stage1SchedInfo = StageSchedulingInfo.builder()
.numberOfInstances(numStage1Workers)
.machineDefinition(new MachineDefinition(2, workerMemoryMB, 200, 1024, 2))
.scalingPolicy(new StageScalingPolicy(scalingStageNum, min, max, increment, decrement, coolDownSec,
Collections.singletonMap(StageScalingPolicy.ScalingReason.Memory,
new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.Memory, scaleDownBelowPct, scaleUpAbovePct, new StageScalingPolicy.RollingCount(1, 2)))))
.scalable(true)
.build();
schedulingInfoMap.put(scalingStageNum, stage1SchedInfo);
final CountDownLatch scaleJobStageSuccessLatch = new CountDownLatch(1);
final AtomicInteger count = new AtomicInteger(0);
final Observable<Boolean> simulateScaleJobStageFailureResp = Observable.just(1).map(new Func1<Integer, Boolean>() {
@Override
public Boolean call(Integer integer) {
if (count.incrementAndGet() < 3) {
throw new IllegalStateException("fake connection exception");
} else {
scaleJobStageSuccessLatch.countDown();
return true;
}
}
});
when(mockMasterClientApi.scaleJobStage(eq(jobId), eq(scalingStageNum), eq(numStage1Workers + increment), anyString())).thenReturn(simulateScaleJobStageFailureResp);
Context context = mock(Context.class);
when(context.getWorkerMapObservable()).thenReturn(Observable.empty());
final JobAutoScaler jobAutoScaler = new JobAutoScaler(jobId, new SchedulingInfo(schedulingInfoMap), mockMasterClientApi, context);
jobAutoScaler.start();
final Observer<JobAutoScaler.Event> jobAutoScalerObserver = jobAutoScaler.getObserver();
// should trigger a scale up (above 45% scaleUp threshold)
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Memory, scalingStageNum, workerMemoryMB * (scaleUpAbovePct / 100.0 + 0.01), numStage1Workers, ""));
verify(mockMasterClientApi, timeout(1000).times(1)).scaleJobStage(jobId, scalingStageNum, numStage1Workers + increment, String.format("Memory with value %1$,.2f exceeded scaleUp threshold of 45.0", (scaleUpAbovePct / 100.0 + 0.01) * 100.0));
scaleJobStageSuccessLatch.await();
}
@Test
public void testScaleDown() throws InterruptedException {
final String jobId = "test-job-1";
final int coolDownSec = 2;
final int scalingStageNum = 1;
final MantisMasterClientApi mockMasterClientApi = mock(MantisMasterClientApi.class);
final Map<Integer, StageSchedulingInfo> schedulingInfoMap = new HashMap<>();
final int numStage1Workers = 2;
final int increment = 1;
final int decrement = 1;
final int min = 1;
final int max = 5;
final double scaleUpAbovePct = 45.0;
final double scaleDownBelowPct = 15.0;
final double workerMemoryMB = 512.0;
final StageSchedulingInfo stage1SchedInfo = StageSchedulingInfo.builder()
.numberOfInstances(numStage1Workers)
.machineDefinition(new MachineDefinition(2, workerMemoryMB, 200, 1024, 2))
.scalingPolicy(new StageScalingPolicy(scalingStageNum, min, max, increment, decrement, coolDownSec,
Collections.singletonMap(StageScalingPolicy.ScalingReason.Memory,
new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.Memory, scaleDownBelowPct, scaleUpAbovePct, new StageScalingPolicy.RollingCount(1, 2)))))
.scalable(true)
.build();
schedulingInfoMap.put(scalingStageNum, stage1SchedInfo);
when(mockMasterClientApi.scaleJobStage(eq(jobId), eq(scalingStageNum), eq(numStage1Workers - decrement), anyString())).thenReturn(Observable.just(true));
Context context = mock(Context.class);
when(context.getWorkerMapObservable()).thenReturn(Observable.empty());
final JobAutoScaler jobAutoScaler = new JobAutoScaler(jobId, new SchedulingInfo(schedulingInfoMap), mockMasterClientApi, context);
jobAutoScaler.start();
final Observer<JobAutoScaler.Event> jobAutoScalerObserver = jobAutoScaler.getObserver();
// should trigger a scale down (below 15% scaleDown threshold)
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Memory, scalingStageNum, workerMemoryMB * (scaleDownBelowPct / 100.0 - 0.01), numStage1Workers, ""));
verify(mockMasterClientApi, timeout(1000).times(1)).scaleJobStage(jobId, scalingStageNum, numStage1Workers - decrement, String.format("Memory with value %1$,.2f is below scaleDown threshold of %2$,.1f", (scaleDownBelowPct / 100.0 - 0.01) * 100.0, scaleDownBelowPct));
// should *not* trigger a scale down before cooldown period (below 15% scaleDown threshold)
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Memory, scalingStageNum, workerMemoryMB * (scaleDownBelowPct / 100.0 - 0.01), numStage1Workers - decrement, ""));
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Memory, scalingStageNum, workerMemoryMB * (scaleDownBelowPct / 100.0 - 0.01), numStage1Workers - decrement, ""));
Thread.sleep(coolDownSec * 1000);
if (numStage1Workers - decrement == min) {
// should not trigger a scale down after cooldown period if numWorkers=min (below 15% scaleDown threshold)
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Memory, scalingStageNum, workerMemoryMB * (scaleDownBelowPct / 100.0 - 0.01), numStage1Workers - decrement, ""));
verifyNoMoreInteractions(mockMasterClientApi);
}
}
@Test
public void testScaleDownNotLessThanMin() throws InterruptedException {
final String jobId = "test-job-1";
final int coolDownSec = 2;
final int scalingStageNum = 1;
final MantisMasterClientApi mockMasterClientApi = mock(MantisMasterClientApi.class);
final Map<Integer, StageSchedulingInfo> schedulingInfoMap = new HashMap<>();
final int numStage1Workers = 5;
final int increment = 10;
// decrement by 10 on scale down, this will push num workers below min and below 0.
final int decrement = 10;
final int min = 3;
final int max = 50;
final double scaleUpAbovePct = 45.0;
final double scaleDownBelowPct = 15.0;
final double workerMemoryMB = 512.0;
final StageSchedulingInfo stage1SchedInfo = StageSchedulingInfo.builder()
.numberOfInstances(numStage1Workers).machineDefinition(new MachineDefinition(2, workerMemoryMB, 200, 1024, 2))
.scalingPolicy(new StageScalingPolicy(scalingStageNum, min, max, increment, decrement, coolDownSec,
Collections.singletonMap(StageScalingPolicy.ScalingReason.Memory,
new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.Memory, scaleDownBelowPct, scaleUpAbovePct, new StageScalingPolicy.RollingCount(1, 2)))))
.scalable(true)
.build();
schedulingInfoMap.put(scalingStageNum, stage1SchedInfo);
when(mockMasterClientApi.scaleJobStage(eq(jobId), eq(scalingStageNum), anyInt(), anyString())).thenReturn(Observable.just(true));
Context context = mock(Context.class);
when(context.getWorkerMapObservable()).thenReturn(Observable.empty());
final JobAutoScaler jobAutoScaler = new JobAutoScaler(jobId, new SchedulingInfo(schedulingInfoMap), mockMasterClientApi, context);
jobAutoScaler.start();
final Observer<JobAutoScaler.Event> jobAutoScalerObserver = jobAutoScaler.getObserver();
// should trigger a scale down (below 15% scaleDown threshold)
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Memory, scalingStageNum, workerMemoryMB * (scaleDownBelowPct / 100.0 - 0.01), numStage1Workers, ""));
verify(mockMasterClientApi, timeout(1000).times(1)).scaleJobStage(jobId, scalingStageNum, min, String.format("Memory with value %1$,.2f is below scaleDown threshold of %2$,.1f", (scaleDownBelowPct / 100.0 - 0.01) * 100.0, scaleDownBelowPct));
verifyNoMoreInteractions(mockMasterClientApi);
}
@Test
public void testScaleUpOnDifferentScalingReasons() throws InterruptedException {
final List<StageScalingPolicy.ScalingReason> scalingReasons = Arrays.asList(DataDrop, KafkaLag, UserDefined);
for (StageScalingPolicy.ScalingReason scalingReason : scalingReasons) {
logger.info("==== test scaling reason {} =====", scalingReason.name());
final String jobId = "test-job-1";
final int coolDownSec = 2;
final int scalingStageNum = 1;
final MantisMasterClientApi mockMasterClientApi = mock(MantisMasterClientApi.class);
final Map<Integer, StageSchedulingInfo> schedulingInfoMap = new HashMap<>();
final int numStage1Workers = 1;
final int increment = 1;
final int decrement = 0;
final int min = 1;
final int max = 5;
final double scaleUpAbove = 2000.0;
final double scaleDownBelow = 0.0;
final double workerMemoryMB = 512.0;
final StageSchedulingInfo stage1SchedInfo = StageSchedulingInfo.builder()
.numberOfInstances(numStage1Workers)
.machineDefinition(new MachineDefinition(2, workerMemoryMB, 200, 1024, 2))
.scalingPolicy(new StageScalingPolicy(scalingStageNum, min, max, increment, decrement, coolDownSec,
Collections.singletonMap(scalingReason,
new StageScalingPolicy.Strategy(scalingReason, scaleDownBelow, scaleUpAbove, new StageScalingPolicy.RollingCount(1, 2)))))
.scalable(true)
.build();
schedulingInfoMap.put(scalingStageNum, stage1SchedInfo);
when(mockMasterClientApi.scaleJobStage(eq(jobId), eq(scalingStageNum), eq(numStage1Workers + increment), anyString())).thenReturn(Observable.just(true));
Context context = mock(Context.class);
when(context.getWorkerMapObservable()).thenReturn(Observable.empty());
final JobAutoScaler jobAutoScaler = new JobAutoScaler(jobId, new SchedulingInfo(schedulingInfoMap), mockMasterClientApi, context);
jobAutoScaler.start();
final Observer<JobAutoScaler.Event> jobAutoScalerObserver = jobAutoScaler.getObserver();
// should trigger a scale up (above scaleUp threshold)
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(scalingReason, scalingStageNum, scaleUpAbove + 0.01, numStage1Workers, ""));
verify(mockMasterClientApi, timeout(1000).times(1)).scaleJobStage(jobId, scalingStageNum, numStage1Workers + increment, String.format("%s with value %2$.2f exceeded scaleUp threshold of %3$.1f", scalingReason.name(), (scaleUpAbove + 0.01), scaleUpAbove));
// should *not* trigger a scale up before cooldown period (above scaleUp threshold)
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(scalingReason, scalingStageNum, scaleUpAbove + 0.01, numStage1Workers + increment, ""));
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(scalingReason, scalingStageNum, scaleUpAbove + 0.01, numStage1Workers + increment, ""));
Thread.sleep(coolDownSec * 1000);
// retry sending auto scale event till scaleJobStage request sent to master, as there is possible a race between the sleep for coolDownSecs in the Test and the event being processed before coolDownSecs
final CountDownLatch retryLatch = new CountDownLatch(1);
when(mockMasterClientApi.scaleJobStage(eq(jobId), eq(scalingStageNum), eq(numStage1Workers + 2 * increment), anyString())).thenAnswer(new Answer<Observable<Void>>() {
@Override
public Observable<Void> answer(InvocationOnMock invocation) throws Throwable {
retryLatch.countDown();
return Observable.just(null);
}
});
do {
logger.info("sending Job auto scale Event");
// should trigger a scale up after cooldown period (above scaleUp threshold)
jobAutoScalerObserver.onNext(new JobAutoScaler.Event(scalingReason, scalingStageNum, scaleUpAbove + 0.01, numStage1Workers + increment, ""));
} while (!retryLatch.await(1, TimeUnit.SECONDS));
verify(mockMasterClientApi, timeout(1000).times(1)).scaleJobStage(jobId, scalingStageNum, numStage1Workers + 2 * increment, String.format("%s with value %2$.2f exceeded scaleUp threshold of %3$.1f", scalingReason.name(), (scaleUpAbove + 0.01), scaleUpAbove));
}
}
@Test
public void testGetClutchConfigurationFromJson() throws Exception {
String json = "{" +
" \"cooldownSeconds\": 100," +
" \"integralDecay\": 0.7," +
" \"rpsConfig\": {" +
" \"scaleUpAbovePct\": 30.0," +
" \"scaleUpMultiplier\": 1.5" +
" }" +
"}";
final JobAutoScaler jobAutoScaler = new JobAutoScaler("jobId", null, null, null);
ClutchConfiguration config = jobAutoScaler.getClutchConfiguration(json).get(1);
ClutchRpsPIDConfig expected = new ClutchRpsPIDConfig(0.0, Tuple.of(30.0, 0.0), 0.0, 0.0, Option.of(75.0), Option.of(30.0), Option.of(0.0), Option.of(1.5), Option.of(1.0));
assertEquals(Option.of(100L), config.getCooldownSeconds());
assertEquals(0.7, config.getIntegralDecay().get(), 1e-10);
assertEquals(expected, config.getRpsConfig().get());
json = "{" +
" \"cooldownSeconds\": 100," +
" \"rpsConfig\": {" +
" \"rope\": [90.0, 80.0]," +
" \"setPointPercentile\": 95.0," +
" \"scaleDownBelowPct\": 150.0," +
" \"scaleDownMultiplier\": 0.5" +
" }" +
"}";
config = jobAutoScaler.getClutchConfiguration(json).get(1);
expected = new ClutchRpsPIDConfig(0.0, Tuple.of(90.0, 80.0), 0.0, 0.0, Option.of(95.0), Option.of(0.0), Option.of(150.0), Option.of(1.0), Option.of(0.5));
assertEquals(expected, config.getRpsConfig().get());
json = "{" +
" \"cooldownSeconds\": 100" +
"}";
config = jobAutoScaler.getClutchConfiguration(json).get(1);
assertFalse(config.getRpsConfig().isDefined());
assertEquals(0, config.getMinSize());
}
// @Test
// public void testBackPressure() throws InterruptedException {
// // DropOperator does not propogate backpressure in this scenario
// final CountDownLatch latch = new CountDownLatch(1000);
// Observable.range(1, 1000)
// .subscribeOn(Schedulers.computation())
// .onBackpressureBuffer(100, () -> System.out.printf("overflow"), BackpressureOverflow.ON_OVERFLOW_DROP_OLDEST)
//// .lift(new DropOperator<Integer>("op1"))
//// .lift(new DropOperator<Integer>("op2"))
// .observeOn(Schedulers.io())
// .map(new Func1<Integer, Integer>() {
// @Override
// public Integer call(Integer integer) {
// try {
// Thread.sleep(10);
// System.out.println("got "+integer);
// latch.countDown();
// } catch (InterruptedException e) {
// e.printStackTrace();
// }
// return integer;
// }
// })
// .subscribe();
//
// latch.await();
// }
}
| 8,416 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/control/AdaptiveAutoScalerTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control;
import static org.mockito.Mockito.mock;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.server.worker.jobmaster.JobAutoScaler;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
import org.junit.Test;
import rx.Observable;
import rx.observers.TestSubscriber;
public class AdaptiveAutoScalerTest {
public static final String cfg = "{\"setPoint\": 10,\n" +
" \"invert\": true,\n" +
" \"rope\": 0.0,\n" +
" \"kp\": 0.2,\n" +
" \"ki\": 0.0,\n" +
" \"kd\": 0.0,\n" +
" \"minScale\": 2,\n" +
" \"maxScale\": 5\n" +
" }";
private static final ObjectMapper objectMapper = new ObjectMapper();
private static long determineScale(double rps, double setPoint) {
return Math.round(Math.ceil(rps / setPoint));
}
@Test
public void shouldScaleUpUnderIncreasingLoadAndRespectMaximum() throws IOException {
// Arrange
Observable<Double> totalRPS = Observable.just(10.0, 20.0, 30.0, 40.0, 50.0, 60.0);
AdaptiveAutoscalerConfig config = objectMapper.readValue(cfg, new TypeReference<AdaptiveAutoscalerConfig>() {});
JobAutoScaler.StageScaler scaler = mock(JobAutoScaler.StageScaler.class);
AdaptiveAutoscaler autoScaler = new AdaptiveAutoscaler(config, scaler, 2);
TestSubscriber<Long> testSubscriber = new TestSubscriber<>();
// Act
AtomicLong numWorkers = new AtomicLong(2);
totalRPS.map(rps -> new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.CPU,
1,
rps / (1.0 * numWorkers.get()),
((Long) numWorkers.get()).intValue(),
"message"))
.compose(autoScaler)
.map(x -> (Long) x)
.doOnNext(numWorkers::set)
.subscribe(testSubscriber);
// Assert
testSubscriber.assertCompleted();
testSubscriber.assertNoErrors();
testSubscriber.assertValues(2L, 2L, 3L, 4L, 5L, 5L);
}
@Test
public void shouldScaleDownUnderDecreasingLoadAndRespectMinimum() throws IOException {
// Arrange
Observable<Double> totalRPS = Observable.just(60.0, 50.0, 40.0, 30.0, 20.0, 10.0, 10.0);
AdaptiveAutoscalerConfig config = objectMapper.readValue(cfg, new TypeReference<AdaptiveAutoscalerConfig>() {});
JobAutoScaler.StageScaler scaler = mock(JobAutoScaler.StageScaler.class);
AdaptiveAutoscaler autoScaler = new AdaptiveAutoscaler(config, scaler, 5);
TestSubscriber<Long> testSubscriber = new TestSubscriber<>();
// Act
AtomicLong numWorkers = new AtomicLong(5);
totalRPS.map(rps -> new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.CPU,
1,
rps / (1.0 * numWorkers.get()),
((Long) numWorkers.get()).intValue(),
"message"))
.compose(autoScaler)
.map(x -> (Long) x)
.doOnNext(numWorkers::set)
.subscribe(testSubscriber);
// Assert
testSubscriber.assertCompleted();
testSubscriber.assertNoErrors();
testSubscriber.assertValues(5L, 5L, 5L, 4L, 3L, 2L, 2L);
}
@Test
public void shouldPauseScalingWhenWaitingOnWorkersAndResumeAfter() throws IOException {
// Arrange
Observable<Double> totalRPS = Observable.just(10.0, 20.0, 30.0, 40.0, 50.0, 60.0);
AdaptiveAutoscalerConfig config = objectMapper.readValue(cfg, new TypeReference<AdaptiveAutoscalerConfig>() {});
JobAutoScaler.StageScaler scaler = mock(JobAutoScaler.StageScaler.class);
AdaptiveAutoscaler autoScaler = new AdaptiveAutoscaler(config, scaler, 2);
TestSubscriber<Long> testSubscriber = new TestSubscriber<>();
// Act
// This elaborate scheme skips the scaling action at the 40.0 RPS point
// as we have not received the instances requested at the 30.0 RPS point
AtomicLong numWorkers = new AtomicLong(2);
AtomicLong count = new AtomicLong(0);
totalRPS
.doOnNext(x -> {
if (count.incrementAndGet() == 5) {
numWorkers.set(3);
}
})
.map(rps -> new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.CPU,
1,
rps / (1.0 * numWorkers.get()),
((Long) numWorkers.get()).intValue(),
"message"))
.compose(autoScaler)
.map(x -> (Long) x)
.doOnNext(n -> {
if (count.get() > 4) {
numWorkers.set(n);
}
})
.subscribe(testSubscriber);
// Assert
testSubscriber.assertCompleted();
testSubscriber.assertNoErrors();
testSubscriber.assertValues(2L, 2L, 3L, 5L, 5L);
}
@Test
public void shouldRemainConstantWhenValuesAreWithinRope() throws IOException {
// Arrange
final String cfg2 = "{\"setPoint\": 10,\n" +
" \"invert\": true,\n" +
" \"rope\": 3.0,\n" +
" \"kp\": 0.2,\n" +
" \"ki\": 0.0,\n" +
" \"kd\": 0.0,\n" +
" \"minScale\": 2,\n" +
" \"maxScale\": 5\n" +
" }";
Observable<Double> totalRPS = Observable.just(30.0, 32.0, 28.0, 31.0, 30.0, 29.0, 31.0);
AdaptiveAutoscalerConfig config = objectMapper.readValue(cfg2, new TypeReference<AdaptiveAutoscalerConfig>() {});
JobAutoScaler.StageScaler scaler = mock(JobAutoScaler.StageScaler.class);
AdaptiveAutoscaler autoScaler = new AdaptiveAutoscaler(config, scaler, 3);
TestSubscriber<Long> testSubscriber = new TestSubscriber<>();
// Act
AtomicLong numWorkers = new AtomicLong(3);
totalRPS.map(rps -> new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.CPU,
1,
rps / (1.0 * numWorkers.get()),
((Long) numWorkers.get()).intValue(),
"message"))
.compose(autoScaler)
.map(x -> (Long) x)
.doOnNext(numWorkers::set)
.subscribe(testSubscriber);
// Assert
testSubscriber.assertCompleted();
testSubscriber.assertNoErrors();
testSubscriber.assertValues(3L, 3L, 3L, 3L, 3L, 3L, 3L);
}
}
| 8,417 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/control | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/control/utils/IntegratorTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control.utils;
import org.junit.Test;
import rx.Observable;
import rx.observers.TestSubscriber;
public class IntegratorTest {
private final Observable<Double> data = Observable.just(1.0, -1.0, 0.0, -10.0);
@Test
public void shouldIntegrateOverInput() {
Observable<Double> result = data.lift(new Integrator(0));
TestSubscriber<Double> testSubscriber = new TestSubscriber<>();
result.subscribe(testSubscriber);
testSubscriber.assertCompleted();
testSubscriber.assertValues(1.0, 0.0, 0.0, -10.0);
}
@Test
public void shouldRespectMinimumValue() {
Observable<Double> result = data.lift(new Integrator(0, 0.0, 10.0));
TestSubscriber<Double> testSubscriber = new TestSubscriber<>();
result.subscribe(testSubscriber);
testSubscriber.assertCompleted();
testSubscriber.assertValues(1.0, 0.0, 0.0, 0.0);
}
@Test
public void shouldRespectMaximumValue() {
Observable<Double> result = data.lift(new Integrator(0, -100.0, 0.0));
TestSubscriber<Double> testSubscriber = new TestSubscriber<>();
result.subscribe(testSubscriber);
testSubscriber.assertCompleted();
testSubscriber.assertValues(0.0, -1.0, -1.0, -11.0);
}
@Test
public void shouldBeginFromInitialSuppliedValue() {
Observable<Double> result = data.lift(new Integrator(1.0));
TestSubscriber<Double> testSubscriber = new TestSubscriber<>();
result.subscribe(testSubscriber);
testSubscriber.assertCompleted();
testSubscriber.assertValues(2.0, 1.0, 1.0, -9.0);
}
}
| 8,418 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/control | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/control/utils/ErrorComputerTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control.utils;
import org.junit.Test;
import rx.Observable;
import rx.observers.TestSubscriber;
public class ErrorComputerTest {
private final Observable<Double> data = Observable.just(1.0, -1.0, 0.0, 10.0);
@Test
public void shouldComputeError() {
Observable<Double> result = data.lift(new ErrorComputer(0.0, false, 0.0));
TestSubscriber<Double> testSubscriber = new TestSubscriber<>();
result.subscribe(testSubscriber);
testSubscriber.assertCompleted();
testSubscriber.assertValues(-1.0, 1.0, 0.0, -10.0);
}
@Test
public void shouldComputeInvertedError() {
Observable<Double> result = data.lift(new ErrorComputer(0.0, true, 0.0));
TestSubscriber<Double> testSubscriber = new TestSubscriber<>();
result.subscribe(testSubscriber);
testSubscriber.assertCompleted();
testSubscriber.assertValues(1.0, -1.0, -0.0, 10.0);
}
@Test
public void shouldTreatValuesWithinRopeAsZero() {
Observable<Double> result = data.lift(new ErrorComputer(0.0, false, 1.0));
TestSubscriber<Double> testSubscriber = new TestSubscriber<>();
result.subscribe(testSubscriber);
testSubscriber.assertCompleted();
testSubscriber.assertValues(0.0, 0.0, 0.0, -9.0);
}
@Test
public void shouldWorkWithAsymmetricalRope() {
Observable<Double> result = data.lift(new ErrorComputer(0.0, false, 0.0, 5.0));
TestSubscriber<Double> testSubscriber = new TestSubscriber<>();
result.subscribe(testSubscriber);
testSubscriber.assertCompleted();
testSubscriber.assertValues(0.0, 1.0, 0.0, -5.0);
}
}
| 8,419 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/control | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/control/actuators/MantisStageActuatorTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control.actuators;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import io.mantisrx.server.worker.jobmaster.JobAutoScaler;
import org.junit.Test;
import rx.Observable;
import rx.observers.TestSubscriber;
public class MantisStageActuatorTest {
Observable<Double> data = Observable.just(1.1, 3.0, 2.85, 0.1);
@Test
public void shouldEchoCeilingOfInput() {
JobAutoScaler.StageScaler mockScaler = mock(JobAutoScaler.StageScaler.class);
Observable<Double> result = data.lift(new MantisStageActuator(1, mockScaler));
TestSubscriber<Double> testSubscriber = new TestSubscriber<>();
result.subscribe(testSubscriber);
testSubscriber.assertCompleted();
testSubscriber.assertValues(2.0, 3.0, 3.0, 1.0);
}
@Test
public void shouldCallScalerWhenInputChanged() {
JobAutoScaler.StageScaler mockScaler = mock(JobAutoScaler.StageScaler.class);
Observable<Double> result = data.lift(new MantisStageActuator(1, mockScaler));
TestSubscriber<Double> testSubscriber = new TestSubscriber<>();
result.subscribe(testSubscriber);
testSubscriber.assertCompleted();
testSubscriber.assertValues(2.0, 3.0, 3.0, 1.0);
verify(mockScaler).scaleUpStage(eq(1), eq(2), any());
verify(mockScaler).scaleUpStage(eq(2), eq(3), any());
verify(mockScaler).scaleDownStage(eq(3), eq(1), any());
}
}
| 8,420 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/control | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/control/controllers/PIDControllerTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control.controllers;
import static org.junit.Assert.assertEquals;
import io.mantisrx.server.worker.jobmaster.control.Controller;
import org.junit.Test;
import rx.Observable;
public class PIDControllerTest {
@Test
public void shouldControlCacheSizeToMaintainDesiredHitRate() {
// Arrange
Cache cache = new Cache();
final double setPoint = 0.75;
Controller controller = PIDController.of(25.0, 1.0, 0.0);
double currentSize = 0.0;
// Act
for (int step = 0; step <= 100; ++step) {
double hitRate = cache.process(currentSize);
double error = setPoint - hitRate;
double controlAction = Observable.just(error).lift(controller).toBlocking().first();
currentSize += controlAction;
}
// Assert
assertEquals(0.75, cache.process(currentSize), 0.01); // Hitrate is within 1% of target.
assertEquals(0.75 * 250, currentSize, 5.0); // Cache size is within 5 of target size.
}
private class Cache {
private double totalNumberOfItems = 250.0;
public double process(double size) {
if (size <= 0.0) {
return 0.0;
}
if (size > totalNumberOfItems) {
return 100.0;
}
return size / totalNumberOfItems;
}
}
}
| 8,421 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/clutch | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/clutch/rps/RpsScaleComputerTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.clutch.rps;
import static org.junit.Assert.assertEquals;
import com.netflix.control.clutch.ClutchConfiguration;
import io.vavr.Tuple;
import io.vavr.control.Option;
import org.junit.Test;
public class RpsScaleComputerTest {
@Test
public void testApply() {
ClutchRpsPIDConfig rpsConfig = new ClutchRpsPIDConfig(0.0, Tuple.of(0.0, 0.0), 0.0, 0.0, Option.none(), Option.of(40.0), Option.of(60.0), Option.of(2.0), Option.of(0.5));
RpsScaleComputer scaleComputer = new RpsScaleComputer(rpsConfig);
ClutchConfiguration config = ClutchConfiguration.builder().minSize(1).maxSize(1000).build();
double scale = scaleComputer.apply(config, 100L, 0.1);
assertEquals(100, scale, 1e-10);
scale = scaleComputer.apply(config, 100L, 0.5);
assertEquals(200, scale, 1e-10);
scale = scaleComputer.apply(config, 100L, -0.7);
assertEquals(65, scale, 1e-10);
}
@Test
public void testDefaultConfig() {
RpsScaleComputer scaleComputer = new RpsScaleComputer(null);
ClutchConfiguration config = ClutchConfiguration.builder().minSize(1).maxSize(1000).build();
double scale = scaleComputer.apply(config, 100L, 0.1);
assertEquals(110, scale, 1e-10);
scale = scaleComputer.apply(config, 100L, 0.5);
assertEquals(150, scale, 1e-10);
scale = scaleComputer.apply(config, 100L, -0.7);
assertEquals(30, scale, 1e-10);
}
}
| 8,422 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/clutch | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/clutch/rps/RpsMetricComputerTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.clutch.rps;
import static org.junit.Assert.assertEquals;
import com.netflix.control.clutch.Clutch;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.util.Map;
import org.junit.Test;
public class RpsMetricComputerTest {
@Test
public void testApply() {
Map<Clutch.Metric, Double> metrics = ImmutableMap.of(
Clutch.Metric.RPS, 1000.0,
Clutch.Metric.DROPS, 20.0,
Clutch.Metric.LAG, 300.0,
Clutch.Metric.SOURCEJOB_DROP, 4.0
);
double result = new RpsMetricComputer().apply(null, metrics);
assertEquals(1504.0, result, 1e-10);
}
}
| 8,423 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/clutch | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/jobmaster/clutch/rps/RpsClutchConfigurationSelectorTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.clutch.rps;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import com.netflix.control.clutch.Clutch;
import com.netflix.control.clutch.ClutchConfiguration;
import com.yahoo.sketches.quantiles.UpdateDoublesSketch;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import io.vavr.Tuple;
import io.vavr.control.Option;
import java.util.Map;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class RpsClutchConfigurationSelectorTest {
private static final Logger logger = LoggerFactory.getLogger(RpsClutchConfigurationSelectorTest.class);
@Test
public void testApply() {
UpdateDoublesSketch rpsSketch = UpdateDoublesSketch.builder().setK(1024).build();
rpsSketch.update(100);
Map<Clutch.Metric, UpdateDoublesSketch> sketches = ImmutableMap.of(Clutch.Metric.RPS, rpsSketch);
ClutchRpsPIDConfig rpsConfig = new ClutchRpsPIDConfig(0.0, Tuple.of(20.0, 10.0), 0, 0, Option.none(), Option.none(), Option.none(), Option.none(), Option.none());
io.mantisrx.server.worker.jobmaster.clutch.ClutchConfiguration customConfig = new io.mantisrx.server.worker.jobmaster.clutch.ClutchConfiguration(
1, 10, 0, Option.none(), Option.of(300L), Option.none(), Option.none(), Option.none(), Option.none(), Option.none(), Option.of(rpsConfig), Option.none(),
Option.of(0.7));
StageSchedulingInfo schedulingInfo = StageSchedulingInfo.builder()
.numberOfInstances(3)
.machineDefinition(null)
.scalable(true)
.build();
RpsClutchConfigurationSelector selector = new RpsClutchConfigurationSelector(1, schedulingInfo, customConfig);
ClutchConfiguration config = selector.apply(sketches);
assertEquals(Clutch.Metric.RPS, config.getMetric());
assertEquals(100.0, config.getSetPoint(), 1e-10);
assertEquals(1, config.getMinSize());
assertEquals(10, config.getMaxSize());
assertEquals(Tuple.of(20.0, 10.0), config.getRope());
assertEquals(300L, config.getCooldownInterval());
assertEquals(0.3, config.getIntegralDecay(), 1e-10);
}
@Test
public void testScalingPolicyFallback() {
UpdateDoublesSketch rpsSketch = UpdateDoublesSketch.builder().setK(1024).build();
rpsSketch.update(100);
Map<Clutch.Metric, UpdateDoublesSketch> sketches = ImmutableMap.of(Clutch.Metric.RPS, rpsSketch);
StageScalingPolicy scalingPolicy = new StageScalingPolicy(1, 2, 9, 0, 0, 400L, null);
StageSchedulingInfo schedulingInfo = StageSchedulingInfo.builder()
.numberOfInstances(3)
.scalingPolicy(scalingPolicy)
.scalable(true)
.build();
RpsClutchConfigurationSelector selector = new RpsClutchConfigurationSelector(1, schedulingInfo, null);
ClutchConfiguration config = selector.apply(sketches);
assertEquals(Clutch.Metric.RPS, config.getMetric());
assertEquals(100.0, config.getSetPoint(), 1e-10);
assertEquals(2, config.getMinSize());
assertEquals(9, config.getMaxSize());
assertEquals(Tuple.of(30.0, 0.0), config.getRope());
assertEquals(400L, config.getCooldownInterval());
assertEquals(0.9, config.getIntegralDecay(), 1e-10);
}
@Test
public void testSetPointQuantile() {
UpdateDoublesSketch rpsSketch = UpdateDoublesSketch.builder().setK(1024).build();
for (int i = 1; i <= 100; i++) {
rpsSketch.update(i);
}
Map<Clutch.Metric, UpdateDoublesSketch> sketches = ImmutableMap.of(Clutch.Metric.RPS, rpsSketch);
StageScalingPolicy scalingPolicy = new StageScalingPolicy(1, 2, 9, 0, 0, 400L, null);
StageSchedulingInfo schedulingInfo = StageSchedulingInfo.builder()
.numberOfInstances(3)
.scalingPolicy(scalingPolicy)
.scalable(true)
.build();
RpsClutchConfigurationSelector selector = new RpsClutchConfigurationSelector(1, schedulingInfo, null);
ClutchConfiguration config = selector.apply(sketches);
assertEquals(76.0, config.getSetPoint(), 1e-10);
assertEquals(Tuple.of(22.8, 0.0), config.getRope());
}
@Test
public void testReturnSameConfigIfSetPointWithin5Percent() {
UpdateDoublesSketch rpsSketch = UpdateDoublesSketch.builder().setK(1024).build();
for (int i = 1; i <= 100; i++) {
rpsSketch.update(i);
}
Map<Clutch.Metric, UpdateDoublesSketch> sketches = ImmutableMap.of(Clutch.Metric.RPS, rpsSketch);
StageScalingPolicy scalingPolicy = new StageScalingPolicy(1, 2, 9, 0, 0, 400L, null);
StageSchedulingInfo schedulingInfo = StageSchedulingInfo.builder()
.numberOfInstances(3)
.scalingPolicy(scalingPolicy)
.scalable(true)
.build();
RpsClutchConfigurationSelector selector = new RpsClutchConfigurationSelector(1, schedulingInfo, null);
ClutchConfiguration config = selector.apply(sketches);
assertEquals(76.0, config.getSetPoint(), 1e-10);
for (int i = 101; i <= 105; i++) {
rpsSketch.update(i);
}
ClutchConfiguration newConfig = selector.apply(sketches);
// Instance equality
assertTrue(config == newConfig);
for (int i = 106; i < 110; i++) {
rpsSketch.update(i);
}
newConfig = selector.apply(sketches);
assertTrue(config != newConfig);
assertEquals(82.0, newConfig.getSetPoint(), 1e-10);
}
@Test
public void testSetPointDriftAdjust() {
UpdateDoublesSketch rpsSketch = UpdateDoublesSketch.builder().setK(1024).build();
for (int i = 1; i <= 100; i++) {
if (i <= 76) {
rpsSketch.update(i);
} else {
rpsSketch.update(1000 + i);
}
}
Map<Clutch.Metric, UpdateDoublesSketch> sketches = ImmutableMap.of(Clutch.Metric.RPS, rpsSketch);
StageScalingPolicy scalingPolicy = new StageScalingPolicy(1, 2, 9, 0, 0, 400L, null);
StageSchedulingInfo schedulingInfo = StageSchedulingInfo.builder()
.numberOfInstances(3)
.scalingPolicy(scalingPolicy)
.scalable(true)
.build();
RpsClutchConfigurationSelector selector = new RpsClutchConfigurationSelector(1, schedulingInfo, null);
ClutchConfiguration config = selector.apply(sketches);
assertEquals(83.6, config.getSetPoint(), 1e-10);
}
}
| 8,424 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/config/WorkerConfigurationTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.config;
import static org.junit.Assert.assertEquals;
import io.mantisrx.runtime.loader.config.WorkerConfiguration;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.util.Properties;
import org.junit.Test;
public class WorkerConfigurationTest {
@Test
public void testTaskExecutorAttributesWhenEmptyStringIsPassed() {
final Properties props = new Properties();
props.setProperty("mantis.zookeeper.root", "");
props.setProperty("mantis.taskexecutor.attributes", "");
final WorkerConfiguration workerConfiguration = new StaticPropertiesConfigurationFactory(props).getConfig();
assertEquals(ImmutableMap.of(), workerConfiguration.getTaskExecutorAttributes());
}
@Test
public void testTaskExecutorAttributesWhenASingleValueKVPairIsPassed() {
final Properties props = new Properties();
props.setProperty("mantis.taskexecutor.attributes", "key1:val1");
props.setProperty("mantis.zookeeper.root", "");
final WorkerConfiguration workerConfiguration = new StaticPropertiesConfigurationFactory(props).getConfig();
assertEquals(ImmutableMap.of("key1", "val1"), workerConfiguration.getTaskExecutorAttributes());
}
@Test
public void testMoreThanOneKeyValuePair() {
final Properties props = new Properties();
props.setProperty("mantis.taskexecutor.attributes", "key1:val1,key2:val2,key3:val3");
props.setProperty("mantis.zookeeper.root", "");
final WorkerConfiguration workerConfiguration = new StaticPropertiesConfigurationFactory(props).getConfig();
assertEquals(ImmutableMap.of("key1", "val1", "key2", "val2", "key3", "val3"), workerConfiguration.getTaskExecutorAttributes());
}
}
| 8,425 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/test/java/io/mantisrx/server/worker/mesos/TestMesosMetricsCollector.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.mesos;
import io.mantisrx.runtime.loader.config.Usage;
import lombok.extern.slf4j.Slf4j;
import org.junit.Test;
@Slf4j
public class TestMesosMetricsCollector {
private static final String stats3 = "[\n" +
"{\n" +
"executor_id: \"SpeedBump-66-worker-0-0\",\n" +
"executor_name: \"Mantis Worker Executor\",\n" +
"framework_id: \"MantisFramework\",\n" +
"source: \"Outliers-mock-84\",\n" +
"statistics: \n" +
"{\n" +
"cpus_limit: 1,\n" +
"cpus_system_time_secs: 0.11,\n" +
"cpus_user_time_secs: 2.16,\n" +
"mem_limit_bytes: 2147483648,\n" +
"mem_rss_bytes: 97460224,\n" +
"timestamp: 1420842205.86559\n" +
"}\n" +
"}\n" +
"]";
private static final String stats4 = "[\n" +
"{\n" +
"executor_id: \"SpeedBump-66-worker-0-0\",\n" +
"executor_name: \"Mantis Worker Executor\",\n" +
"framework_id: \"MantisFramework\",\n" +
"source: \"Outliers-mock-84\",\n" +
"statistics: \n" +
"{\n" +
"cpus_limit: 1,\n" +
"cpus_system_time_secs: 0.13,\n" +
"cpus_user_time_secs: 3.16,\n" +
"mem_limit_bytes: 2147483648,\n" +
"mem_rss_bytes: 97460224,\n" +
"timestamp: 1420842205.86559\n" +
"}\n" +
"}\n" +
"]";
private static final String stats5 = "[\n" +
"{\n" +
"executor_id: \"APIHystrixMetricsSource-5-worker-0-10\",\n" +
"executor_name: \"Mantis Worker Executor\",\n" +
"framework_id: \"MantisFramework\",\n" +
"source: \"APIHystrixMetricsSource-5\",\n" +
"statistics: {\n" +
"cpus_limit: 8,\n" +
"cpus_system_time_secs: 5.4,\n" +
"cpus_user_time_secs: 67.74,\n" +
"mem_anon_bytes: 1265774592,\n" +
"mem_file_bytes: 48386048,\n" +
"mem_limit_bytes: 10510925824,\n" +
"mem_mapped_file_bytes: 1232896,\n" +
"mem_rss_bytes: 1314697216,\n" +
"net_rx_bytes: 994208159,\n" +
"net_rx_dropped: 0,\n" +
"net_rx_errors: 0,\n" +
"net_rx_packets: 723567,\n" +
"net_tx_bytes: 195020860,\n" +
"net_tx_dropped: 0,\n" +
"net_tx_errors: 0,\n" +
"net_tx_packets: 564689,\n" +
"timestamp: 1421792142.02197\n" +
"}\n" +
"}\n" +
"]";
@Test
public void test() {
final Usage usage1 = MesosMetricsCollector.getCurentUsage("SpeedBump-66-worker-0-0", stats3);
final Usage usage2 = MesosMetricsCollector.getCurentUsage("SpeedBump-66-worker-0-0", stats4);
log.info("cpuUsr=" + (usage2.getCpusUserTimeSecs() - usage1.getCpusUserTimeSecs()) + ", rss=" + (usage1.getMemRssBytes() / (1024 * 1024)));
final Usage usage3 = MesosMetricsCollector.getCurentUsage("APIHystrixMetricsSource-5-worker-0-10", stats5);
log.info("network read MB: " + (usage3.getNetworkReadBytes() / (1024.0 * 1024.0)) + ", write MB=" +
(usage3.getNetworkWriteBytes() / (1024.0 * 1024.0)));
}
}
| 8,426 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/MantisWorker.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import com.sampullara.cli.Args;
import com.sampullara.cli.Argument;
import io.mantisrx.common.metrics.netty.MantisNettyEventsListenerFactory;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.loader.ClassLoaderHandle;
import io.mantisrx.runtime.loader.SinkSubscriptionStateHandler;
import io.mantisrx.runtime.loader.config.WorkerConfiguration;
import io.mantisrx.server.core.BaseService;
import io.mantisrx.server.core.Service;
import io.mantisrx.server.core.WrappedExecuteStageRequest;
import io.mantisrx.server.master.client.HighAvailabilityServices;
import io.mantisrx.server.master.client.HighAvailabilityServicesUtil;
import io.mantisrx.server.master.client.MantisMasterGateway;
import io.mantisrx.server.worker.config.ConfigurationFactory;
import io.mantisrx.server.worker.config.StaticPropertiesConfigurationFactory;
import io.mantisrx.server.worker.mesos.VirtualMachineTaskStatus;
import io.mantisrx.server.worker.mesos.VirualMachineWorkerServiceMesosImpl;
import io.mantisrx.shaded.com.google.common.collect.ImmutableList;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.time.Clock;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import mantis.io.reactivex.netty.RxNetty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Subscription;
import rx.subjects.PublishSubject;
/**
* This class is the executable entry point for the worker. It constructs the related components (LeaderService),
* and starts them.
*/
public class MantisWorker extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(MantisWorker.class);
@Argument(alias = "p", description = "Specify a configuration file", required = false)
private static String propFile = "worker.properties";
private CountDownLatch blockUntilShutdown = new CountDownLatch(1);
// static {
// RxNetty.useNativeTransportIfApplicable();
// }
private List<Service> mantisServices = new LinkedList<Service>();
public MantisWorker(ConfigurationFactory configFactory, io.mantisrx.server.master.client.config.ConfigurationFactory coreConfigFactory) {
this(configFactory, Optional.empty());
}
public MantisWorker(ConfigurationFactory configFactory, Optional<Job> jobToRun) {
// for rxjava
System.setProperty("rx.ring-buffer.size", "1024");
WorkerConfiguration config = configFactory.getConfig();
final HighAvailabilityServices highAvailabilityServices =
HighAvailabilityServicesUtil.createHAServices(config);
mantisServices.add(new Service() {
@Override
public void start() {
highAvailabilityServices.startAsync().awaitRunning();
}
@Override
public void shutdown() {
highAvailabilityServices.stopAsync().awaitTerminated();
}
@Override
public void enterActiveMode() {
}
@Override
public String toString() {
return "HighAvailabilityServices Service";
}
});
final MantisMasterGateway gateway =
highAvailabilityServices.getMasterClientApi();
// shutdown hook
Thread t = new Thread() {
@Override
public void run() {
shutdown();
}
};
t.setDaemon(true);
Runtime.getRuntime().addShutdownHook(t);
// services
// metrics
PublishSubject<WrappedExecuteStageRequest> executeStageSubject = PublishSubject.create();
PublishSubject<VirtualMachineTaskStatus> vmTaskStatusSubject = PublishSubject.create();
mantisServices.add(new VirualMachineWorkerServiceMesosImpl(executeStageSubject, vmTaskStatusSubject));
// TODO(sundaram): inline services are hard to read. Would be good to refactor this.
mantisServices.add(new Service() {
private RuntimeTaskImpl runtimeTaskImpl;
private Subscription vmStatusSubscription;
@Override
public void start() {
final ClassLoader classLoader;
if (Thread.currentThread().getContextClassLoader() == null) {
classLoader = ClassLoader.getSystemClassLoader();
logger.info("Choosing system classloader {}", classLoader);
} else {
classLoader = Thread.currentThread().getContextClassLoader();
logger.info("Choosing current thread classloader {}", classLoader);
}
executeStageSubject
.asObservable()
.first()
.subscribe(wrappedRequest -> {
try {
runtimeTaskImpl = new RuntimeTaskImpl();
// invoke internal runtimeTaskImpl initialize to inject the wrapped request.
runtimeTaskImpl.initialize(
wrappedRequest,
config,
gateway,
ClassLoaderHandle.fixed(classLoader).getOrResolveClassLoader(
ImmutableList.of(), ImmutableList.of()),
SinkSubscriptionStateHandler
.Factory
.forEphemeralJobsThatNeedToBeKilledInAbsenceOfSubscriber(
gateway,
Clock.systemDefaultZone()));
runtimeTaskImpl.setJob(jobToRun);
vmStatusSubscription =
runtimeTaskImpl.getVMStatus().subscribe(vmTaskStatusSubject);
runtimeTaskImpl.startAsync();
} catch (Exception ex) {
logger.error("Failed to start task, request: {}", wrappedRequest, ex);
throw new RuntimeException("Failed to start task", ex);
}
});
}
@Override
public void shutdown() {
if (runtimeTaskImpl != null) {
try {
runtimeTaskImpl.stopAsync().awaitTerminated();
} finally {
vmStatusSubscription.unsubscribe();
}
}
}
@Override
public void enterActiveMode() {
}
@Override
public String toString() {
return "TaskService";
}
});
/* To run MantisWorker locally in IDE, use VirualMachineWorkerServiceLocalImpl instead
WorkerTopologyInfo.Data workerData = new WorkerTopologyInfo.Data(data.getJobName(), data.getJobId(),
data.getWorkerIndex(), data.getWorkerNumber(), data.getStageNumber(), data.getNumStages(), -1, -1, data.getMetricsPort());
mantisServices.add(new VirtualMachineWorkerServiceLocalImpl(workerData, executeStageSubject, vmTaskStatusSubject));
*/
}
private static Properties loadProperties(String propFile) {
// config
Properties props = new Properties();
try (InputStream in = findResourceAsStream(propFile)) {
props.load(in);
} catch (IOException e) {
throw new RuntimeException(String.format("Can't load properties from the given property file %s: %s", propFile, e.getMessage()), e);
}
return props;
}
/**
* Finds the given resource and returns its input stream. This method seeks the file first from the current working directory,
* and then in the class path.
*
* @param resourceName the name of the resource. It can either be a file name, or a path.
* @return An {@link java.io.InputStream} instance that represents the found resource. Null otherwise.
* @throws java.io.FileNotFoundException
*/
private static InputStream findResourceAsStream(String resourceName) throws FileNotFoundException {
File resource = new File(resourceName);
if (resource.exists()) {
return new FileInputStream(resource);
}
InputStream is = Thread.currentThread().getContextClassLoader().getResourceAsStream(resourceName);
if (is == null) {
throw new FileNotFoundException(String.format("Can't find property file %s. Make sure the property file is either in your path or in your classpath ", resourceName));
}
return is;
}
public static void main(String[] args) {
try {
Args.parse(MantisWorker.class, args);
} catch (IllegalArgumentException e) {
Args.usage(MantisWorker.class);
System.exit(1);
}
try {
StaticPropertiesConfigurationFactory workerConfigFactory = new StaticPropertiesConfigurationFactory(loadProperties(propFile));
io.mantisrx.server.master.client.config.StaticPropertiesConfigurationFactory coreConfigFactory =
new io.mantisrx.server.master.client.config.StaticPropertiesConfigurationFactory(loadProperties(propFile));
MantisWorker worker = new MantisWorker(workerConfigFactory, coreConfigFactory);
worker.start();
} catch (Exception e) {
// unexpected to get runtime exception, will exit
logger.error("Unexpected error: " + e.getMessage(), e);
System.exit(2);
}
}
@Override
public void start() {
startUp();
awaitTerminated();
}
/**
* The difference between this method and MantisWorker::start is that this doesn't wait for the service to be shut down
* while start gets blocked on the service to be shutdown.
*
* The reason it is this way is because of existing usages which depend upon this behavior. In the future, once
* mantis migrates off the old architecture, this can be removed.
*/
public void startUp() {
logger.info("Starting Mantis Worker");
RxNetty.useMetricListenersFactory(new MantisNettyEventsListenerFactory());
for (Service service : mantisServices) {
logger.info("Starting service: " + service);
try {
service.start();
} catch (Throwable e) {
logger.error(String.format("Failed to start service %s: %s", service, e.getMessage()), e);
throw e;
}
logger.info("Started service: " + service);
}
logger.info("Started Mantis Worker successfully");
}
public void awaitTerminated() {
try {
blockUntilShutdown.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
@Override
public void shutdown() {
logger.info("Shutting down Mantis Worker");
for (Service service : mantisServices) {
service.shutdown();
}
blockUntilShutdown.countDown();
}
@Override
public void enterActiveMode() {
}
}
| 8,427 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/TrackedExecuteStageRequest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.WrappedExecuteStageRequest;
import rx.Observer;
public class TrackedExecuteStageRequest {
private WrappedExecuteStageRequest executeRequest;
private Observer<Status> status;
public TrackedExecuteStageRequest(WrappedExecuteStageRequest executeRequest,
Observer<Status> status) {
this.executeRequest = executeRequest;
this.status = status;
}
public WrappedExecuteStageRequest getExecuteRequest() {
return executeRequest;
}
public Observer<Status> getStatus() {
return status;
}
}
| 8,428 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/VirtualMachineWorkerService.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import io.mantisrx.server.core.Service;
public interface VirtualMachineWorkerService extends Service {
}
| 8,429 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/VirtualMachineWorkerServiceLocalImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.MachineDefinitions;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.core.BaseService;
import io.mantisrx.server.core.ExecuteStageRequest;
import io.mantisrx.server.core.WorkerTopologyInfo;
import io.mantisrx.server.core.WrappedExecuteStageRequest;
import io.mantisrx.server.worker.mesos.VirtualMachineTaskStatus;
import io.mantisrx.server.worker.mesos.VirtualMachineTaskStatus.TYPE;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import org.apache.mesos.MesosExecutorDriver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.schedulers.Schedulers;
import rx.subjects.PublishSubject;
/* Local impl to fake a task launch from mesos to allow running a MantisWorker in IDE for development */
public class VirtualMachineWorkerServiceLocalImpl extends BaseService implements VirtualMachineWorkerService {
private static final Logger logger = LoggerFactory.getLogger(VirtualMachineWorkerServiceLocalImpl.class);
private final WorkerTopologyInfo.Data workerInfo;
private MesosExecutorDriver mesosDriver;
private ExecutorService executor;
private Observer<WrappedExecuteStageRequest> executeStageRequestObserver;
private Observable<VirtualMachineTaskStatus> vmTaskStatusObservable;
public VirtualMachineWorkerServiceLocalImpl(final WorkerTopologyInfo.Data workerInfo,
Observer<WrappedExecuteStageRequest> executeStageRequestObserver,
Observable<VirtualMachineTaskStatus> vmTaskStatusObservable) {
this.workerInfo = workerInfo;
this.executeStageRequestObserver = executeStageRequestObserver;
this.vmTaskStatusObservable = vmTaskStatusObservable;
executor = Executors.newSingleThreadExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r, "vm_worker_mesos_executor_thread");
t.setDaemon(true);
return t;
}
});
}
private WrappedExecuteStageRequest createExecuteStageRequest() throws MalformedURLException {
// TODO make ExecuteStageRequest params configurable
final long timeoutToReportStartSec = 5;
final URL jobJarUrl = new URL("file:/Users/nmahilani/Projects/Mantis/mantis-sdk/examples/sine-function/build/distributions/sine-function-1.0.zip");
final List<Integer> ports = Arrays.asList(31015, 31013, 31014);
final List<Parameter> params = Collections.singletonList(new Parameter("useRandom", "true"));
final int numInstances = 1;
// new MachineDefinition(2, 300, 200, 1024, 2), true));
final Map<Integer, StageSchedulingInfo> schedulingInfoMap = new HashMap<>();
final StageSchedulingInfo stage0SchedInfo = StageSchedulingInfo.builder()
.numberOfInstances(numInstances)
.machineDefinition(MachineDefinitions.micro())
.build();
final StageSchedulingInfo stage1SchedInfo = StageSchedulingInfo.builder()
.numberOfInstances(numInstances)
.machineDefinition(new MachineDefinition(2, 300, 200, 1024, 2))
.scalingPolicy(new StageScalingPolicy(1, 1, 5, 1, 1, 30,
Collections.singletonMap(StageScalingPolicy.ScalingReason.Memory,
new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.Memory, 15.0, 25.0, new StageScalingPolicy.RollingCount(1, 2)))))
.scalable(true)
.build();
// schedulingInfoMap.put(0, stage0SchedInfo);
schedulingInfoMap.put(1, stage1SchedInfo);
final SchedulingInfo schedInfo = new SchedulingInfo(schedulingInfoMap);
final ExecuteStageRequest executeStageRequest = new ExecuteStageRequest(workerInfo.getJobName(), workerInfo.getJobId(), workerInfo.getWorkerIndex(), workerInfo.getWorkerNumber(),
jobJarUrl, workerInfo.getStageNumber(), workerInfo.getNumStages(), ports, timeoutToReportStartSec, workerInfo.getMetricsPort(), params, schedInfo, MantisJobDurationType.Transient,
0, 0L, 0L, new WorkerPorts(Arrays.asList(7151, 7152, 7153, 7154, 7155)), Optional.empty(),
"user");
return new WrappedExecuteStageRequest(PublishSubject.<Boolean>create(), executeStageRequest);
}
private void setupRequestFailureHandler(long waitSeconds, Observable<Boolean> requestObservable,
final Action0 errorHandler) {
requestObservable
.buffer(waitSeconds, TimeUnit.SECONDS, 1)
.take(1)
.subscribe(new Observer<List<Boolean>>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
logger.error("onError called for request failure handler");
errorHandler.call();
}
@Override
public void onNext(List<Boolean> booleans) {
logger.info("onNext called for request failure handler with items: " +
((booleans == null) ? "-1" : booleans.size()));
if ((booleans == null) || booleans.isEmpty())
errorHandler.call();
}
});
}
@Override
public void start() {
logger.info("Starting VirtualMachineWorkerServiceLocalImpl");
Schedulers.newThread().createWorker().schedule(new Action0() {
@Override
public void call() {
try {
WrappedExecuteStageRequest request = null;
request = createExecuteStageRequest();
setupRequestFailureHandler(request.getRequest().getTimeoutToReportStart(), request.getRequestSubject(),
new Action0() {
@Override
public void call() {
logger.error("launch error");
}
});
logger.info("onNext'ing WrappedExecuteStageRequest: {}", request.toString());
executeStageRequestObserver.onNext(request);
} catch (MalformedURLException e) {
e.printStackTrace();
}
}
}, 2, TimeUnit.SECONDS);
// subscribe to vm task updates on current thread
vmTaskStatusObservable.subscribe(new Action1<VirtualMachineTaskStatus>() {
@Override
public void call(VirtualMachineTaskStatus vmTaskStatus) {
TYPE type = vmTaskStatus.getType();
if (type == TYPE.COMPLETED) {
logger.info("Got COMPLETED state for " + vmTaskStatus.getTaskId());
} else if (type == TYPE.STARTED) {
logger.info("Would send RUNNING state to mesos, worker started for " + vmTaskStatus.getTaskId());
}
}
});
}
@Override
public void shutdown() {
logger.info("Unregistering Mantis Worker with Mesos executor callbacks");
mesosDriver.stop();
executor.shutdown();
}
@Override
public void enterActiveMode() {}
}
| 8,430 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/WorkerExecutionOperationsNetworkStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import static io.mantisrx.common.SystemParameters.JOB_MASTER_AUTOSCALE_METRIC_SYSTEM_PARAM;
import static io.mantisrx.server.core.utils.StatusConstants.STATUS_MESSAGE_FORMAT;
import com.mantisrx.common.utils.Closeables;
import com.netflix.spectator.api.Registry;
import io.mantisrx.common.WorkerPorts;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.SpectatorRegistryFactory;
import io.mantisrx.common.network.Endpoint;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.StageConfig;
import io.mantisrx.runtime.WorkerInfo;
import io.mantisrx.runtime.WorkerMap;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.runtime.executor.PortSelector;
import io.mantisrx.runtime.executor.StageExecutors;
import io.mantisrx.runtime.executor.WorkerConsumer;
import io.mantisrx.runtime.executor.WorkerConsumerRemoteObservable;
import io.mantisrx.runtime.executor.WorkerPublisherRemoteObservable;
import io.mantisrx.runtime.lifecycle.Lifecycle;
import io.mantisrx.runtime.lifecycle.ServiceLocator;
import io.mantisrx.runtime.loader.SinkSubscriptionStateHandler;
import io.mantisrx.runtime.loader.config.WorkerConfiguration;
import io.mantisrx.runtime.parameter.ParameterUtils;
import io.mantisrx.runtime.parameter.Parameters;
import io.mantisrx.server.core.ExecuteStageRequest;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.ServiceRegistry;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.Status.TYPE;
import io.mantisrx.server.core.StatusPayloads;
import io.mantisrx.server.core.WorkerAssignments;
import io.mantisrx.server.core.WorkerHost;
import io.mantisrx.server.master.client.MantisMasterGateway;
import io.mantisrx.server.worker.client.WorkerMetricsClient;
import io.mantisrx.server.worker.jobmaster.AutoScaleMetricsConfig;
import io.mantisrx.server.worker.jobmaster.JobMasterService;
import io.mantisrx.server.worker.jobmaster.JobMasterStageConfig;
import io.mantisrx.server.worker.mesos.VirtualMachineTaskStatus;
import io.mantisrx.shaded.com.google.common.base.Splitter;
import io.mantisrx.shaded.com.google.common.base.Strings;
import io.reactivex.mantis.remote.observable.RemoteRxServer;
import io.reactivex.mantis.remote.observable.RxMetrics;
import io.reactivex.mantis.remote.observable.ToDeltaEndpointInjector;
import java.io.Closeable;
import java.io.IOException;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.functions.Action0;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
// stage that actually calls custom stage method
public class WorkerExecutionOperationsNetworkStage implements WorkerExecutionOperations {
private static final Logger logger = LoggerFactory.getLogger(WorkerExecutionOperationsNetworkStage.class);
private final WorkerConfiguration config;
private final WorkerMetricsClient workerMetricsClient;
private final AtomicReference<Heartbeat> heartbeatRef = new AtomicReference<>();
private final SinkSubscriptionStateHandler.Factory sinkSubscriptionStateHandlerFactory;
private final Observer<VirtualMachineTaskStatus> vmTaskStatusObserver;
private final MantisMasterGateway mantisMasterApi;
private int connectionsPerEndpoint = 2;
private boolean lookupSpectatorRegistry = true;
private SinkSubscriptionStateHandler subscriptionStateHandler;
private Action0 onSinkSubscribe = null;
private Action0 onSinkUnsubscribe = null;
private final List<Closeable> closeables = new ArrayList<>();
private final ScheduledExecutorService scheduledExecutorService;
private final ClassLoader classLoader;
private Observer<Status> jobStatusObserver;
public WorkerExecutionOperationsNetworkStage(
Observer<VirtualMachineTaskStatus> vmTaskStatusObserver,
MantisMasterGateway mantisMasterApi,
WorkerConfiguration config,
WorkerMetricsClient workerMetricsClient,
SinkSubscriptionStateHandler.Factory sinkSubscriptionStateHandlerFactory,
ClassLoader classLoader) {
this.vmTaskStatusObserver = vmTaskStatusObserver;
this.mantisMasterApi = mantisMasterApi;
this.config = config;
this.workerMetricsClient = workerMetricsClient;
this.sinkSubscriptionStateHandlerFactory = sinkSubscriptionStateHandlerFactory;
this.classLoader = classLoader;
String connectionsPerEndpointStr =
ServiceRegistry.INSTANCE.getPropertiesService().getStringValue("mantis.worker.connectionsPerEndpoint", "2");
if (connectionsPerEndpointStr != null && !connectionsPerEndpointStr.equals("2")) {
connectionsPerEndpoint = Integer.parseInt(connectionsPerEndpointStr);
}
String locateSpectatorRegistry =
ServiceRegistry.INSTANCE.getPropertiesService().getStringValue("mantis.worker.locate.spectator.registry", "true");
lookupSpectatorRegistry = Boolean.valueOf(locateSpectatorRegistry);
scheduledExecutorService = new ScheduledThreadPoolExecutor(1);
}
/**
* Converts a JobSchedulingInfo object to a simple WorkerMap to be used from within the context.
* Static for easier testing.
*
* @param jobName
* @param jobId
* @param durationType
* @param js
*
* @return
*/
static WorkerMap convertJobSchedulingInfoToWorkerMap(String jobName, String jobId, MantisJobDurationType durationType, JobSchedulingInfo js) {
Map<Integer, List<WorkerInfo>> stageToWorkerInfoMap = new HashMap<>();
WorkerMap workerMap = new WorkerMap(stageToWorkerInfoMap);
if (jobName == null || jobName.isEmpty() || jobId == null || jobId.isEmpty()) {
logger.warn("Job name/jobId cannot be null in convertJobSchedulingInfoToWorkerMap");
return workerMap;
}
if (js == null || js.getWorkerAssignments() == null) {
logger.warn("JobSchedulingInfo or workerAssignments cannot be null in convertJobSchedulingInfoToWorkerMap");
return workerMap;
}
try {
Map<Integer, WorkerAssignments> workerAssignments = js.getWorkerAssignments();
Iterator<Map.Entry<Integer, WorkerAssignments>> entryIterator = workerAssignments.entrySet().iterator();
while (entryIterator.hasNext()) {
Map.Entry<Integer, WorkerAssignments> next = entryIterator.next();
int stageNo = next.getKey();
WorkerAssignments workerAssignmentsForStage = next.getValue();
Map<Integer, WorkerHost> hosts = workerAssignmentsForStage.getHosts();
if (hosts != null) {
List<WorkerInfo> workerInfoList = hosts.values().stream().map((workerHost) -> {
return generateWorkerInfo(jobName, jobId, stageNo, workerHost.getWorkerIndex(), workerHost.getWorkerNumber(), durationType, workerHost.getHost(), workerHost);
}).collect(Collectors.toList());
stageToWorkerInfoMap.put(stageNo, workerInfoList);
}
}
workerMap = new WorkerMap(stageToWorkerInfoMap);
} catch (Exception e) {
logger.warn("Exception converting JobSchedulingInfo " + js + " to worker Map " + e.getMessage());
return workerMap;
}
return workerMap;
}
private static WorkerInfo generateWorkerInfo(String jobName, String jobId, int stageNumber, int workerIndex, int workerNumber, MantisJobDurationType durationType, String host, WorkerHost workerHost) {
int sinkPort = Optional.ofNullable(workerHost.getPort()).map(ports -> (ports.size() >= 1 ? ports.get(0) : -1)).orElse(-1);
WorkerPorts wPorts = new WorkerPorts(workerHost.getMetricsPort(), 65534, 65535, workerHost.getCustomPort(), sinkPort);
return generateWorkerInfo(jobName, jobId, stageNumber, workerIndex, workerNumber, durationType, host, wPorts);
}
private static WorkerInfo generateWorkerInfo(String jobName, String jobId, int stageNumber, int workerIndex, int workerNumber, MantisJobDurationType durationType, String host, WorkerPorts workerPorts) {
return new WorkerInfo(jobName, jobId, stageNumber, workerIndex, workerNumber, durationType, host, workerPorts);
}
private static Context generateContext(Parameters parameters, ServiceLocator serviceLocator, WorkerInfo workerInfo,
MetricsRegistry metricsRegistry, Action0 completeAndExitAction, Observable<WorkerMap> workerMapObservable, ClassLoader classLoader) {
return new Context(parameters, serviceLocator, workerInfo, metricsRegistry, completeAndExitAction, workerMapObservable, classLoader);
}
private Closeable startSendingHeartbeats(final Observer<Status> jobStatusObserver, double networkMbps, long heartbeatIntervalSecs) {
heartbeatRef.get().setPayload(String.valueOf(StatusPayloads.Type.SubscriptionState), "false");
Future<?> heartbeatFuture = scheduledExecutorService.scheduleWithFixedDelay(
() -> jobStatusObserver.onNext(heartbeatRef.get().getCurrentHeartbeatStatus()),
heartbeatIntervalSecs,
heartbeatIntervalSecs,
TimeUnit.SECONDS);
// start heartbeat payload setter for incoming data drops
DataDroppedPayloadSetter droppedPayloadSetter = new DataDroppedPayloadSetter(heartbeatRef.get());
droppedPayloadSetter.start(heartbeatIntervalSecs);
ResourceUsagePayloadSetter usagePayloadSetter = new ResourceUsagePayloadSetter(heartbeatRef.get(), config, networkMbps);
usagePayloadSetter.start(heartbeatIntervalSecs);
return Closeables.combine(() -> heartbeatFuture.cancel(false), droppedPayloadSetter, usagePayloadSetter);
}
/**
* Converts JobSchedulingInfo to a simpler WorkerMap object to be used within Context
*
* @param selfSchedulingInfo
* @param jobName
* @param jobId
* @param durationType
*
* @return
*/
private Observable<WorkerMap> createWorkerMapObservable(Observable<JobSchedulingInfo> selfSchedulingInfo, String jobName, String jobId, MantisJobDurationType durationType) {
return selfSchedulingInfo
.filter(jobSchedulingInfo -> (jobSchedulingInfo != null && jobSchedulingInfo.getWorkerAssignments() != null && !jobSchedulingInfo.getWorkerAssignments().isEmpty()))
.map((jssi) -> convertJobSchedulingInfoToWorkerMap(jobName, jobId, durationType, jssi));
}
private Observable<Integer> createSourceStageTotalWorkersObservable(Observable<JobSchedulingInfo> selfSchedulingInfo) {
return selfSchedulingInfo
.filter(jobSchedulingInfo -> (jobSchedulingInfo != null &&
jobSchedulingInfo.getWorkerAssignments() != null &&
!jobSchedulingInfo.getWorkerAssignments().isEmpty()))
.map((JobSchedulingInfo schedulingInfo) -> {
final Map<Integer, WorkerAssignments> workerAssignmentsMap = schedulingInfo.getWorkerAssignments();
final int stageNum = 1;
final WorkerAssignments workerAssignments = workerAssignmentsMap.get(stageNum);
return workerAssignments.getNumWorkers();
});
}
private void signalStarted(RunningWorker rw) {
rw.signalStarted();
}
@SuppressWarnings( {"rawtypes", "unchecked"})
@Override
public void executeStage(final ExecutionDetails setup) throws IOException {
ExecuteStageRequest executionRequest = setup.getExecuteStageRequest().getRequest();
jobStatusObserver = setup.getStatus();
// Initialize the schedulingInfo observable for current job and mark it shareable to be reused by anyone interested in this data.
//Observable<JobSchedulingInfo> selfSchedulingInfo = mantisMasterApi.schedulingChanges(executionRequest.getJobId()).switchMap((e) -> Observable.just(e).repeatWhen(x -> x.delay(5 , TimeUnit.SECONDS))).subscribeOn(Schedulers.io()).share();
// JobSchedulingInfo has metadata around which stage runs on which set of workers
Observable<JobSchedulingInfo> selfSchedulingInfo =
mantisMasterApi.schedulingChanges(executionRequest.getJobId())
.subscribeOn(Schedulers.io())
.replay(1)
.refCount()
.doOnSubscribe(() -> logger.info("mantisApi schedulingChanges subscribe"))
.doOnUnsubscribe(() -> logger.info("mantisApi schedulingChanges stream unsub."))
.doOnError(e -> logger.warn("mantisApi schedulingChanges stream error:", e))
.doOnCompleted(() -> logger.info("mantisApi schedulingChanges stream completed."));
// represents datastructure that has the current worker information and what it represents in the overall operator DAG
WorkerInfo workerInfo = generateWorkerInfo(executionRequest.getJobName(), executionRequest.getJobId(),
executionRequest.getStage(), executionRequest.getWorkerIndex(),
executionRequest.getWorkerNumber(), executionRequest.getDurationType(), "host", executionRequest.getWorkerPorts());
// observable that represents the number of workers for the source stage
final Observable<Integer> sourceStageTotalWorkersObs = createSourceStageTotalWorkersObservable(selfSchedulingInfo);
RunningWorker.Builder rwBuilder = new RunningWorker.Builder()
.job(setup.getMantisJob())
.schedulingInfo(executionRequest.getSchedulingInfo())
.stageTotalWorkersObservable(sourceStageTotalWorkersObs)
.jobName(executionRequest.getJobName())
.stageNum(executionRequest.getStage())
.workerIndex(executionRequest.getWorkerIndex())
.workerNum(executionRequest.getWorkerNumber())
.totalStages(executionRequest.getTotalNumStages())
.metricsPort(executionRequest.getMetricsPort())
.ports(executionRequest.getPorts().iterator())
.jobStatusObserver(setup.getStatus())
.requestSubject(setup.getExecuteStageRequest().getRequestSubject())
.workerInfo(workerInfo)
.vmTaskStatusObservable(vmTaskStatusObserver)
.hasJobMaster(executionRequest.getHasJobMaster())
.jobId(executionRequest.getJobId());
if (executionRequest.getStage() == 0) {
rwBuilder = rwBuilder.stage(new JobMasterStageConfig("jobmasterconfig"));
} else {
rwBuilder = rwBuilder.stage((StageConfig) setup.getMantisJob()
.getStages().get(executionRequest.getStage() - 1));
}
final RunningWorker rw = rwBuilder.build();
if (rw.getStageNum() == rw.getTotalStagesNet()) {
// set up subscription state handler only for sink (last) stage
setupSubscriptionStateHandler(setup.getExecuteStageRequest().getRequest());
}
logger.info("Running worker info: " + rw);
rw.signalStartedInitiated();
try {
logger.info(">>>>>>>>>>>>>>>>Calling lifecycle.startup()");
Lifecycle lifecycle = rw.getJob().getLifecycle();
lifecycle.startup();
ServiceLocator serviceLocator = lifecycle.getServiceLocator();
if (lookupSpectatorRegistry) {
try {
final Registry spectatorRegistry = serviceLocator.service(Registry.class);
SpectatorRegistryFactory.setRegistry(spectatorRegistry);
} catch (Throwable t) {
logger.error("failed to init spectator registry using service locator, falling back to {}",
SpectatorRegistryFactory.getRegistry().getClass().getCanonicalName());
}
}
// create job context
Parameters parameters = ParameterUtils
.createContextParameters(rw.getJob().getParameterDefinitions(),
setup.getParameters());
final Context context = generateContext(parameters, serviceLocator, workerInfo, MetricsRegistry.getInstance(),
() -> {
rw.signalCompleted();
// wait for completion signal to go to the master and us getting killed. Upon timeout, exit.
try {Thread.sleep(60000);} catch (InterruptedException ie) {
logger.warn("Unexpected exception sleeping: " + ie.getMessage());
}
System.exit(0);
}, createWorkerMapObservable(selfSchedulingInfo, executionRequest.getJobName(), executionRequest.getJobId(), executionRequest.getDurationType()),
classLoader
);
//context.setPrevStageCompletedObservable(createPrevStageCompletedObservable(selfSchedulingInfo, rw.getJobId(), rw.getStageNum()));
rw.setContext(context);
// setup heartbeats
heartbeatRef.set(new Heartbeat(rw.getJobId(),
rw.getStageNum(), rw.getWorkerIndex(), rw.getWorkerNum(), config.getTaskExecutorHostName()));
final double networkMbps = executionRequest.getSchedulingInfo().forStage(rw.getStageNum()).getMachineDefinition().getNetworkMbps();
Closeable heartbeatCloseable = startSendingHeartbeats(rw.getJobStatus(), networkMbps, executionRequest.getHeartbeatIntervalSecs());
closeables.add(heartbeatCloseable);
// execute stage
if (rw.getStageNum() == 0) {
logger.info("JobId: " + rw.getJobId() + ", executing Job Master");
final AutoScaleMetricsConfig autoScaleMetricsConfig = new AutoScaleMetricsConfig();
// Temporary workaround to enable auto-scaling by custom metric in Job Master. This will be revisited to get the entire autoscaling config
// for a job as a System parameter in the JobMaster
final String autoScaleMetricString = (String) parameters.get(JOB_MASTER_AUTOSCALE_METRIC_SYSTEM_PARAM, "");
if (!Strings.isNullOrEmpty(autoScaleMetricString)) {
final List<String> tokens = Splitter.on("::").omitEmptyStrings().trimResults().splitToList(autoScaleMetricString);
if (tokens.size() == 3) {
final String metricGroup = tokens.get(0);
final String metricName = tokens.get(1);
final String algo = tokens.get(2);
try {
final AutoScaleMetricsConfig.AggregationAlgo aggregationAlgo = AutoScaleMetricsConfig.AggregationAlgo.valueOf(algo);
logger.info("registered UserDefined auto scale metric {}:{} algo {}", metricGroup, metricName, aggregationAlgo);
autoScaleMetricsConfig.addUserDefinedMetric(metricGroup, metricName, aggregationAlgo);
} catch (IllegalArgumentException e) {
final String errorMsg = String.format("ERROR: Invalid algorithm value %s for param %s (algo should be one of %s)",
autoScaleMetricsConfig, JOB_MASTER_AUTOSCALE_METRIC_SYSTEM_PARAM,
Arrays.stream(AutoScaleMetricsConfig.AggregationAlgo.values()).map(a -> a.name()).collect(Collectors.toList()));
logger.error(errorMsg);
throw new RuntimeException(errorMsg);
}
} else {
final String errorMsg = String.format("ERROR: Invalid value %s for param %s", autoScaleMetricString, JOB_MASTER_AUTOSCALE_METRIC_SYSTEM_PARAM);
logger.error(errorMsg);
throw new RuntimeException(errorMsg);
}
} else {
logger.info("param {} is null or empty", JOB_MASTER_AUTOSCALE_METRIC_SYSTEM_PARAM);
}
JobMasterService jobMasterService = new JobMasterService(rw.getJobId(), rw.getSchedulingInfo(),
workerMetricsClient, autoScaleMetricsConfig, mantisMasterApi, rw.getContext(), rw.getOnCompleteCallback(), rw.getOnErrorCallback(), rw.getOnTerminateCallback());
jobMasterService.start();
closeables.add(jobMasterService::shutdown);
signalStarted(rw);
// block until worker terminates
rw.waitUntilTerminate();
} else if (rw.getStageNum() == 1 && rw.getTotalStagesNet() == 1) {
logger.info("JobId: " + rw.getJobId() + ", single stage job, executing entire job");
// single stage, execute entire job on this machine
PortSelector portSelector = new PortSelector() {
@Override
public int acquirePort() {
return rw.getPorts().next();
}
};
RxMetrics rxMetrics = new RxMetrics();
closeables.add(StageExecutors.executeSingleStageJob(rw.getJob().getSource(), rw.getStage(),
rw.getJob().getSink(), portSelector, rxMetrics, rw.getContext(),
rw.getOnTerminateCallback(), rw.getWorkerIndex(),
rw.getSourceStageTotalWorkersObservable(),
onSinkSubscribe, onSinkUnsubscribe,
rw.getOnCompleteCallback(), rw.getOnErrorCallback()));
signalStarted(rw);
// block until worker terminates
rw.waitUntilTerminate();
} else {
logger.info("JobId: " + rw.getJobId() + ", executing a multi-stage job, stage: " + rw.getStageNum());
if (rw.getStageNum() == 1) {
// execute source stage
String remoteObservableName = rw.getJobId() + "_" + rw.getStageNum();
StageSchedulingInfo currentStageSchedulingInfo = rw.getSchedulingInfo().forStage(1);
WorkerPublisherRemoteObservable publisher
= new WorkerPublisherRemoteObservable<>(rw.getPorts().next(),
remoteObservableName, numWorkersAtStage(selfSchedulingInfo, rw.getJobId(), rw.getStageNum() + 1),
rw.getJobName());
closeables.add(StageExecutors.executeSource(rw.getWorkerIndex(), rw.getJob().getSource(),
rw.getStage(), publisher, rw.getContext(), rw.getSourceStageTotalWorkersObservable()));
logger.info("JobId: " + rw.getJobId() + " stage: " + rw.getStageNum() + ", serving remote observable for source with name: " + remoteObservableName);
RemoteRxServer server = publisher.getServer();
RxMetrics rxMetrics = server.getMetrics();
MetricsRegistry.getInstance().registerAndGet(rxMetrics.getCountersAndGauges());
signalStarted(rw);
logger.info("JobId: " + rw.getJobId() + " stage: " + rw.getStageNum() + ", blocking until source observable completes");
server.blockUntilServerShutdown();
} else {
// execute intermediate stage or last stage plus sink
executeNonSourceStage(selfSchedulingInfo, rw);
}
}
logger.info("Calling lifecycle.shutdown()");
lifecycle.shutdown();
} catch (Throwable t) {
logger.warn("Error during executing stage; shutting down.", t);
rw.signalFailed(t);
shutdownStage();
}
}
private void setupSubscriptionStateHandler(ExecuteStageRequest executeStageRequest) {
final SinkSubscriptionStateHandler subscriptionStateHandler =
sinkSubscriptionStateHandlerFactory.apply(executeStageRequest);
onSinkSubscribe = () -> {
// TODO remove this line to set heartbeat payloads when master has upgraded to having jobMaster design
heartbeatRef.get().setPayload(StatusPayloads.Type.SubscriptionState.toString(), Boolean.toString(true));
subscriptionStateHandler.onSinkSubscribed();
};
onSinkUnsubscribe = () -> {
// TODO remove this line to set heartbeat payloads when master has upgraded to having jobMaster design
heartbeatRef.get().setPayload(StatusPayloads.Type.SubscriptionState.toString(), Boolean.toString(false));
subscriptionStateHandler.onSinkUnsubscribed();
};
this.subscriptionStateHandler = subscriptionStateHandler;
try {
this.subscriptionStateHandler.startAsync().awaitRunning(Duration.of(5, ChronoUnit.SECONDS));
} catch (TimeoutException e) {
logger.error("Failed to start subscriptionStateHandler: ", e);
throw new RuntimeException(e);
}
}
@SuppressWarnings( {"rawtypes", "unchecked"})
private void executeNonSourceStage(Observable<JobSchedulingInfo> selfSchedulingInfo, final RunningWorker rw) {
{
// execute either intermediate (middle) stage or last+sink
StageConfig previousStageExecuting = (StageConfig) rw.getJob().getStages()
.get(rw.getStageNum() - 2); // note, stages are zero indexed
StageSchedulingInfo previousSchedulingInfo = rw.getSchedulingInfo().forStage(rw.getStageNum() - 1);
int numInstanceAtPreviousStage = previousSchedulingInfo.getNumberOfInstances();
AtomicBoolean acceptSchedulingChanges = new AtomicBoolean(true);
WorkerConsumer consumer = connectToObservableAtPreviousStages(selfSchedulingInfo, rw.getJobId(), rw.getStageNum() - 1,
numInstanceAtPreviousStage, previousStageExecuting, acceptSchedulingChanges,
rw.getJobStatus(), rw.getStageNum(), rw.getWorkerIndex(), rw.getWorkerNum());
final int workerPort = rw.getPorts().next();
if (rw.getStageNum() == rw.getTotalStagesNet()) {
// last+sink stage
logger.info(
"JobId: {}, executing sink stage: {}, signaling started", rw.getJobId(), rw.getStageNum());
rw.getJobStatus().onNext(new Status(rw.getJobId(), rw.getStageNum(), rw.getWorkerIndex(),
rw.getWorkerNum(),
TYPE.INFO, String.format(STATUS_MESSAGE_FORMAT, rw.getStageNum(), rw.getWorkerIndex(), rw.getWorkerNum(), "running"),
MantisJobState.Started));
PortSelector portSelector = new PortSelector() {
@Override
public int acquirePort() {
return workerPort;
}
};
RxMetrics rxMetrics = new RxMetrics();
MetricsRegistry.getInstance().registerAndGet(rxMetrics.getCountersAndGauges());
final CountDownLatch blockUntilComplete = new CountDownLatch(1);
Action0 countDownLatch = new Action0() {
@Override
public void call() {
blockUntilComplete.countDown();
}
};
closeables.add(StageExecutors.executeSink(consumer, rw.getStage(),
rw.getJob().getSink(), portSelector, rxMetrics,
rw.getContext(), countDownLatch, onSinkSubscribe, onSinkUnsubscribe,
rw.getOnCompleteCallback(), rw.getOnErrorCallback()));
// block until completes
try {
blockUntilComplete.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
acceptSchedulingChanges.set(false);
} else {
// intermediate stage
logger.info("JobId: " + rw.getJobId() + ", executing intermediate stage: " + rw.getStageNum());
int stageNumToExecute = rw.getStageNum();
String jobId = rw.getJobId();
String remoteObservableName = jobId + "_" + stageNumToExecute;
WorkerPublisherRemoteObservable publisher
= new WorkerPublisherRemoteObservable<>(workerPort, remoteObservableName,
numWorkersAtStage(selfSchedulingInfo, rw.getJobId(), rw.getStageNum() + 1), rw.getJobName());
closeables.add(StageExecutors.executeIntermediate(consumer, rw.getStage(), publisher,
rw.getContext()));
RemoteRxServer server = publisher.getServer();
logger.info("JobId: " + jobId + " stage: " + stageNumToExecute + ", serving intermediate remote observable with name: " + remoteObservableName);
RxMetrics rxMetrics = server.getMetrics();
MetricsRegistry.getInstance().registerAndGet(rxMetrics.getCountersAndGauges());
// send running signal only after server is started
signalStarted(rw);
logger.info("JobId: " + jobId + " stage: " + stageNumToExecute + ", blocking until intermediate observable completes");
server.blockUntilServerShutdown();
acceptSchedulingChanges.set(false);
}
}
}
private Observable<Integer> numWorkersAtStage(Observable<JobSchedulingInfo> selfSchedulingInfo, String jobId, final int stageNum) {
//return mantisMasterApi.schedulingChanges(jobId)
return selfSchedulingInfo
.distinctUntilChanged((prevJobSchedInfo, currentJobSchedInfo) -> (!prevJobSchedInfo.equals(currentJobSchedInfo)) ? false : true)
.flatMap((Func1<JobSchedulingInfo, Observable<WorkerAssignments>>) schedulingChange -> {
Map<Integer, WorkerAssignments> assignments = schedulingChange.getWorkerAssignments();
if (assignments != null && !assignments.isEmpty()) {
return Observable.from(assignments.values());
} else {
return Observable.empty();
}
})
.filter(assignments -> (assignments.getStage() == stageNum))
.map(assignments -> {
return assignments.getNumWorkers() * connectionsPerEndpoint; // scale by numConnections
}).share();
}
@SuppressWarnings( {"rawtypes"})
private WorkerConsumer connectToObservableAtPreviousStages(Observable<JobSchedulingInfo> selfSchedulingInfo, final String jobId, final int previousStageNum,
int numInstanceAtPreviousStage, final StageConfig previousStage, final AtomicBoolean acceptSchedulingChanges,
final Observer<Status> jobStatusObserver, final int stageNumToExecute, final int workerIndex, final int workerNumber) {
logger.info("Watching for scheduling changes");
//Observable<List<Endpoint>> schedulingUpdates = mantisMasterApi.schedulingChanges(jobId)
Observable<List<Endpoint>> schedulingUpdates = selfSchedulingInfo
.flatMap((Func1<JobSchedulingInfo, Observable<WorkerAssignments>>) schedulingChange -> {
Map<Integer, WorkerAssignments> assignments = schedulingChange.getWorkerAssignments();
if (assignments != null && !assignments.isEmpty()) {
return Observable.from(assignments.values());
} else {
return Observable.empty();
}
})
.filter(assignments -> (assignments.getStage() == previousStageNum) &&
acceptSchedulingChanges.get())
.map(assignments -> {
List<Endpoint> endpoints = new LinkedList<>();
for (WorkerHost host : assignments.getHosts().values()) {
if (host.getState() == MantisJobState.Started) {
logger.info("Received scheduling update from master, connect request for host: " + host.getHost() + " port: " + host.getPort() + " state: " + host.getState() +
" adding: " + connectionsPerEndpoint + " connections to host");
for (int i = 1; i <= connectionsPerEndpoint; i++) {
final String endpointId = "stage_" + stageNumToExecute + "_index_" + Integer.toString(workerIndex) + "_partition_" + i;
logger.info("Adding endpoint to endpoint injector to be considered for add, with id: " + endpointId);
endpoints.add(new Endpoint(host.getHost(), host.getPort().get(0),
endpointId));
}
}
}
return endpoints;
})
.filter(t1 -> (t1.size() > 0));
String name = jobId + "_" + previousStageNum;
return new WorkerConsumerRemoteObservable(name,
new ToDeltaEndpointInjector(schedulingUpdates));
}
@Override
public void shutdownStage() throws IOException {
if (jobStatusObserver != null) {
final Heartbeat heartbeat = heartbeatRef.get();
final Status status = new Status(heartbeat.getJobId(), heartbeat.getStageNumber(), heartbeat.getWorkerIndex(), heartbeat.getWorkerNumber(),
Status.TYPE.INFO, String.format(STATUS_MESSAGE_FORMAT, heartbeat.getStageNumber(), heartbeat.getWorkerIndex(), heartbeat.getWorkerNumber(), "shutdown"),
MantisJobState.Failed);
jobStatusObserver.onNext(status);
}
if (subscriptionStateHandler != null) {
try {
subscriptionStateHandler.stopAsync();
} catch (Exception e) {
logger.error("Failed to stop subscription state handler successfully", e);
} finally {
subscriptionStateHandler = null;
}
}
Closeables.combine(closeables).close();
scheduledExecutorService.shutdownNow();
logger.info("Shutdown completed");
}
}
| 8,431 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/RuntimeTaskImpl.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import io.mantisrx.common.JsonSerializer;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.loader.RuntimeTask;
import io.mantisrx.runtime.loader.SinkSubscriptionStateHandler;
import io.mantisrx.runtime.loader.config.WorkerConfiguration;
import io.mantisrx.runtime.loader.config.WorkerConfigurationUtils;
import io.mantisrx.runtime.loader.config.WorkerConfigurationWritable;
import io.mantisrx.server.agent.metrics.cgroups.CgroupsMetricsCollector;
import io.mantisrx.server.core.ExecuteStageRequest;
import io.mantisrx.server.core.Service;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.WrappedExecuteStageRequest;
import io.mantisrx.server.core.metrics.MetricsFactory;
import io.mantisrx.server.master.client.HighAvailabilityServices;
import io.mantisrx.server.master.client.HighAvailabilityServicesUtil;
import io.mantisrx.server.master.client.MantisMasterGateway;
import io.mantisrx.server.master.client.TaskStatusUpdateHandler;
import io.mantisrx.server.worker.client.WorkerMetricsClient;
import io.mantisrx.server.worker.mesos.VirtualMachineTaskStatus;
import io.mantisrx.shaded.com.google.common.util.concurrent.AbstractIdleService;
import java.io.IOException;
import java.time.Clock;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.util.UserCodeClassLoader;
import rx.Observable;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
import rx.subjects.PublishSubject;
@Slf4j
public class RuntimeTaskImpl extends AbstractIdleService implements RuntimeTask {
private WrappedExecuteStageRequest wrappedExecuteStageRequest;
private WorkerConfiguration config;
private final List<Service> mantisServices = new ArrayList<>();
// HA service instance on TaskExecutor path. Could be null in mesos task.
private HighAvailabilityServices highAvailabilityServices;
private TaskStatusUpdateHandler taskStatusUpdateHandler;
private MantisMasterGateway masterMonitor;
private UserCodeClassLoader userCodeClassLoader;
private SinkSubscriptionStateHandler.Factory sinkSubscriptionStateHandlerFactory;
private final PublishSubject<Observable<Status>> tasksStatusSubject;
private final PublishSubject<VirtualMachineTaskStatus> vmTaskStatusSubject = PublishSubject.create();
private Optional<Job> mantisJob = Optional.empty();
private ExecuteStageRequest executeStageRequest;
public RuntimeTaskImpl() {
this.tasksStatusSubject = PublishSubject.create();
}
public RuntimeTaskImpl(PublishSubject<Observable<Status>> tasksStatusSubject) {
this.tasksStatusSubject = tasksStatusSubject;
}
@Override
public void initialize(
String executeStageRequestString, // request string + publishSubject replace?
String workerConfigurationString, // config string
UserCodeClassLoader userCodeClassLoader) {
try {
log.info("Creating runtimeTaskImpl.");
log.info("runtimeTaskImpl workerConfigurationString: {}", workerConfigurationString);
log.info("runtimeTaskImpl executeStageRequestString: {}", executeStageRequestString);
JsonSerializer ser = new JsonSerializer();
WorkerConfigurationWritable configWritable =
WorkerConfigurationUtils.stringToWorkerConfiguration(workerConfigurationString);
this.config = configWritable;
ExecuteStageRequest executeStageRequest =
ser.fromJSON(executeStageRequestString, ExecuteStageRequest.class);
this.wrappedExecuteStageRequest =
new WrappedExecuteStageRequest(PublishSubject.create(), executeStageRequest);
log.info("Picking Cgroups metrics collector.");
configWritable.setMetricsCollector(CgroupsMetricsCollector.valueOf(System.getProperties()));
} catch (IOException e) {
throw new RuntimeException(e);
}
this.highAvailabilityServices = HighAvailabilityServicesUtil.createHAServices(config);
this.executeStageRequest = wrappedExecuteStageRequest.getRequest();
this.masterMonitor = this.highAvailabilityServices.getMasterClientApi();
this.userCodeClassLoader = userCodeClassLoader;
this.sinkSubscriptionStateHandlerFactory =
SinkSubscriptionStateHandler.Factory.forEphemeralJobsThatNeedToBeKilledInAbsenceOfSubscriber(
this.highAvailabilityServices.getMasterClientApi(),
Clock.systemDefaultZone());
// link task status to status updateHandler
this.taskStatusUpdateHandler = TaskStatusUpdateHandler.forReportingToGateway(masterMonitor);
this.getStatus().observeOn(Schedulers.io())
.subscribe(status -> this.taskStatusUpdateHandler.onStatusUpdate(status));
}
/**
* Initialize path used by Mesos driver. This is not part of the RuntimeTask interface but only invoked directly
* via mesos startup.
*/
protected void initialize(
WrappedExecuteStageRequest wrappedExecuteStageRequest,
WorkerConfiguration config,
MantisMasterGateway masterMonitor,
UserCodeClassLoader userCodeClassLoader,
SinkSubscriptionStateHandler.Factory sinkSubscriptionStateHandlerFactory) {
log.info("initialize RuntimeTaskImpl on injected ExecuteStageRequest: {}",
wrappedExecuteStageRequest.getRequest());
this.wrappedExecuteStageRequest = wrappedExecuteStageRequest;
this.executeStageRequest = wrappedExecuteStageRequest.getRequest();
this.config = config;
this.masterMonitor = masterMonitor;
this.userCodeClassLoader = userCodeClassLoader;
this.sinkSubscriptionStateHandlerFactory = sinkSubscriptionStateHandlerFactory;
// link task status to status updateHandler
this.taskStatusUpdateHandler = TaskStatusUpdateHandler.forReportingToGateway(masterMonitor);
this.getStatus().observeOn(Schedulers.io())
.subscribe(status -> this.taskStatusUpdateHandler.onStatusUpdate(status));
}
public void setJob(Optional<Job> job) {
this.mantisJob = job;
}
@Override
protected void startUp() throws Exception {
try {
log.info("Starting current task {}", this);
if (this.highAvailabilityServices != null && !this.highAvailabilityServices.isRunning()) {
this.highAvailabilityServices.startAsync().awaitRunning();
}
doRun();
} catch (Exception e) {
log.error("Failed executing the task {}", executeStageRequest, e);
throw e;
}
}
private void doRun() throws Exception {
// shared state
PublishSubject<WrappedExecuteStageRequest> executeStageSubject = PublishSubject.create();
mantisServices.add(MetricsFactory.newMetricsServer(config, executeStageRequest));
// [TODO:andyz] disable noOp publisher for now. Need to fix the full publisher injection.
// mantisServices.add(MetricsFactory.newMetricsPublisher(config, executeStageRequest));
WorkerMetricsClient workerMetricsClient = new WorkerMetricsClient(masterMonitor);
mantisServices.add(new ExecuteStageRequestService(
executeStageSubject,
tasksStatusSubject,
new WorkerExecutionOperationsNetworkStage(
vmTaskStatusSubject,
masterMonitor,
config,
workerMetricsClient,
sinkSubscriptionStateHandlerFactory,
userCodeClassLoader.asClassLoader()),
getJobProviderClass(),
userCodeClassLoader,
mantisJob));
log.info("Starting Mantis Worker for task {}", this);
for (Service service : mantisServices) {
log.info("Starting service: " + service.getClass().getName());
try {
service.start();
} catch (Throwable e) {
log.error("Failed to start service {}", service, e);
throw e;
}
}
// now that all the services have been started, let's submit the request
executeStageSubject.onNext(wrappedExecuteStageRequest);
}
@Override
protected void shutDown() {
log.info("Attempting to cancel task {}", this);
for (Service service : mantisServices) {
log.info("Stopping service: " + service.getClass().getName());
try {
service.shutdown();
} catch (Throwable e) {
log.error(String.format("Failed to stop service %s: %s", service, e.getMessage()), e);
throw e;
}
}
}
private Optional<String> getJobProviderClass() {
return executeStageRequest.getNameOfJobProviderClass();
}
protected Observable<Status> getStatus() {
return tasksStatusSubject
.flatMap((Func1<Observable<Status>, Observable<Status>>) status -> status);
}
public Observable<VirtualMachineTaskStatus> getVMStatus() {
return vmTaskStatusSubject;
}
public String getWorkerId() {
return executeStageRequest.getWorkerId().getId();
}
}
| 8,432 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/ExecutionDetails.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.WrappedExecuteStageRequest;
import java.util.List;
import rx.Observer;
@SuppressWarnings("rawtypes") // suppressed due to unknown mantis job typ
public class ExecutionDetails {
private ClassLoader classLoader;
private WrappedExecuteStageRequest executeStageRequest;
private Observer<Status> status;
private Job mantisJob;
private List<Parameter> parameters;
public ExecutionDetails(WrappedExecuteStageRequest executeStageRequest, Observer<Status> status,
Job mantisJob, ClassLoader classLoader, List<Parameter> parameters) {
this.executeStageRequest = executeStageRequest;
this.status = status;
this.mantisJob = mantisJob;
this.classLoader = classLoader;
this.parameters = parameters;
}
public List<Parameter> getParameters() {
return parameters;
}
public ClassLoader getClassLoader() {
return classLoader;
}
public Observer<Status> getStatus() {
return status;
}
public Job getMantisJob() {
return mantisJob;
}
public WrappedExecuteStageRequest getExecuteStageRequest() {
return executeStageRequest;
}
}
| 8,433 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/InstantiationUtil.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import java.lang.reflect.Constructor;
import java.lang.reflect.Modifier;
/**
* This code is a subset of InstantiationUtil class in Flink.
* {@see <a href="https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/util/InstantiationUtil.java#L419">InstantiationUtil</a>}
*/
public class InstantiationUtil {
/**
* Creates a new instance of the given class.
*
* @param <T> The generic type of the class.
* @param clazz The class to instantiate.
* @return An instance of the given class.
* @throws RuntimeException Thrown, if the class could not be instantiated. The exception
* contains a detailed message about the reason why the instantiation failed.
*/
public static <T> T instantiate(Class<T> clazz) {
if (clazz == null) {
throw new NullPointerException();
}
// try to instantiate the class
try {
return clazz.newInstance();
} catch (InstantiationException | IllegalAccessException iex) {
// check for the common problem causes
checkForInstantiation(clazz);
// here we are, if non of the common causes was the problem. then the error was
// most likely an exception in the constructor or field initialization
throw new RuntimeException(
"Could not instantiate type '"
+ clazz.getName()
+ "' due to an unspecified exception: "
+ iex.getMessage(),
iex);
} catch (Throwable t) {
String message = t.getMessage();
throw new RuntimeException(
"Could not instantiate type '"
+ clazz.getName()
+ "' Most likely the constructor (or a member variable initialization) threw an exception"
+ (message == null ? "." : ": " + message),
t);
}
}
/**
* Creates a new instance of the given class name and type using the provided {@link
* ClassLoader}.
*
* @param className of the class to load
* @param targetType type of the instantiated class
* @param classLoader to use for loading the class
* @param <T> type of the instantiated class
* @return Instance of the given class name
* @throws Exception if the class could not be found
*/
public static <T> T instantiate(
final String className, final Class<T> targetType, final ClassLoader classLoader)
throws Exception {
final Class<? extends T> clazz;
try {
clazz = Class.forName(className, false, classLoader).asSubclass(targetType);
} catch (ClassNotFoundException e) {
throw new Exception(
String.format(
"Could not instantiate class '%s' of type '%s'. Please make sure that this class is on your class path.",
className, targetType.getName()),
e);
}
return instantiate(clazz);
}
/**
* Performs a standard check whether the class can be instantiated by {@code
* Class#newInstance()}.
*
* @param clazz The class to check.
* @throws RuntimeException Thrown, if the class cannot be instantiated by {@code
* Class#newInstance()}.
*/
public static void checkForInstantiation(Class<?> clazz) {
final String errorMessage = checkForInstantiationError(clazz);
if (errorMessage != null) {
throw new RuntimeException(
"The class '" + clazz.getName() + "' is not instantiable: " + errorMessage);
}
}
public static String checkForInstantiationError(Class<?> clazz) {
if (!isPublic(clazz)) {
return "The class is not public.";
} else if (clazz.isArray()) {
return "The class is an array. An array cannot be simply instantiated, as with a parameterless constructor.";
} else if (!isProperClass(clazz)) {
return "The class is not a proper class. It is either abstract, an interface, or a primitive type.";
} else if (isNonStaticInnerClass(clazz)) {
return "The class is an inner class, but not statically accessible.";
} else if (!hasPublicNullaryConstructor(clazz)) {
return "The class has no (implicit) public nullary constructor, i.e. a constructor without arguments.";
} else {
return null;
}
}
/**
* Checks, whether the given class is public.
*
* @param clazz The class to check.
* @return True, if the class is public, false if not.
*/
public static boolean isPublic(Class<?> clazz) {
return Modifier.isPublic(clazz.getModifiers());
}
/**
* Checks, whether the class is a proper class, i.e. not abstract or an interface, and not a
* primitive type.
*
* @param clazz The class to check.
* @return True, if the class is a proper class, false otherwise.
*/
public static boolean isProperClass(Class<?> clazz) {
int mods = clazz.getModifiers();
return !(Modifier.isAbstract(mods)
|| Modifier.isInterface(mods)
|| Modifier.isNative(mods));
}
/**
* Checks, whether the class is an inner class that is not statically accessible. That is
* especially true for anonymous inner classes.
*
* @param clazz The class to check.
* @return True, if the class is a non-statically accessible inner class.
*/
public static boolean isNonStaticInnerClass(Class<?> clazz) {
return clazz.getEnclosingClass() != null
&& (clazz.getDeclaringClass() == null || !Modifier.isStatic(clazz.getModifiers()));
}
/**
* Checks, whether the given class has a public nullary constructor.
*
* @param clazz The class to check.
* @return True, if the class has a public nullary constructor, false if not.
*/
public static boolean hasPublicNullaryConstructor(Class<?> clazz) {
Constructor<?>[] constructors = clazz.getConstructors();
for (Constructor<?> constructor : constructors) {
if (constructor.getParameterTypes().length == 0
&& Modifier.isPublic(constructor.getModifiers())) {
return true;
}
}
return false;
}
}
| 8,434 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/ResourceUsagePayloadSetter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.storage.StorageUnit;
import io.mantisrx.runtime.loader.config.MetricsCollector;
import io.mantisrx.runtime.loader.config.Usage;
import io.mantisrx.runtime.loader.config.WorkerConfiguration;
import io.mantisrx.server.core.StatusPayloads;
import io.mantisrx.server.core.stats.MetricStringConstants;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.Closeable;
import java.io.IOException;
import java.util.StringTokenizer;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ResourceUsagePayloadSetter implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(ResourceUsagePayloadSetter.class);
private static final long bigUsageChgReportingIntervalSecs = 10;
private static final double bigIncreaseThreshold = 0.05;
private final Heartbeat heartbeat;
private final ObjectMapper objectMapper = new ObjectMapper();
private final ScheduledThreadPoolExecutor executor;
private final long[] reportingIntervals;
private final AtomicInteger counter = new AtomicInteger();
private final MetricsCollector resourceUsageUtils;
private final Gauge cpuLimitGauge;
private final Gauge cpuUsageCurrGauge;
private final Gauge cpuUsagePeakGauge;
private final Gauge memLimitGauge;
private final Gauge cachedMemUsageCurrGauge;
private final Gauge cachedMemUsagePeakGauge;
private final Gauge totMemUsageCurrGauge;
private final Gauge totMemUsagePeakGauge;
private final Gauge nwBytesLimitGauge;
private final Gauge nwBytesUsageCurrGauge;
private final Gauge nwBytesUsagePeakGauge;
private final Gauge jvmMemoryUsedGauge;
private final Gauge jvmMemoryMaxGauge;
private final double nwBytesLimit;
private double prev_cpus_system_time_secs = -1.0;
private double prev_cpus_user_time_secs = -1.0;
private double prev_bytes_read = -1.0;
private double prev_bytes_written = -1.0;
private long prevStatsGatheredAt = 0L;
private double peakCpuUsage = 0.0;
private double peakMemCache = 0.0;
private double peakTotMem = 0.0;
private double peakBytesRead = 0.0;
private double peakBytesWritten = 0.0;
private StatusPayloads.ResourceUsage oldUsage = null;
public ResourceUsagePayloadSetter(Heartbeat heartbeat, WorkerConfiguration config, double networkMbps) {
this.heartbeat = heartbeat;
this.nwBytesLimit = networkMbps * 1024.0 * 1024.0 / 8.0; // convert from bits to bytes
executor = new ScheduledThreadPoolExecutor(1);
String defaultReportingSchedule = "5,5,10,10,20,30";
StringTokenizer tokenizer = new StringTokenizer(defaultReportingSchedule, ",");
reportingIntervals = new long[tokenizer.countTokens()];
int t = 0;
while (tokenizer.hasMoreTokens()) {
reportingIntervals[t++] = Long.parseLong(tokenizer.nextToken());
}
resourceUsageUtils = config.getUsageSupplier();
String cpuLimitGaugeName = MetricStringConstants.CPU_PCT_LIMIT;
String cpuUsageCurrGaugeName = MetricStringConstants.CPU_PCT_USAGE_CURR;
String cpuUsagePeakGaugeName = MetricStringConstants.CPU_PCT_USAGE_PEAK;
String memLimitGaugeName = MetricStringConstants.MEM_LIMIT;
String cachedMemUsageCurrGaugeName = MetricStringConstants.CACHED_MEM_USAGE_CURR;
String cachedMemUsagePeakGaugeName = MetricStringConstants.CACHED_MEM_USAGE_PEAK;
String totMemUsageCurrGaugeName = MetricStringConstants.TOT_MEM_USAGE_CURR;
String totMemUsagePeakGaugeName = MetricStringConstants.TOT_MEM_USAGE_PEAK;
String nwBytesLimitGaugeName = MetricStringConstants.NW_BYTES_LIMIT;
String nwBytesUsageCurrGaugeName = MetricStringConstants.NW_BYTES_USAGE_CURR;
String nwBytesUsagePeakGaugeName = MetricStringConstants.NW_BYTES_USAGE_PEAK;
String jvmMemoryUsedGaugeName = "jvmMemoryUsedBytes";
String jvmMemoryMaxGaugeName = "jvmMemoryMaxBytes";
Metrics m = new Metrics.Builder()
.name("ResourceUsage")
.addGauge(cpuLimitGaugeName)
.addGauge(cpuUsageCurrGaugeName)
.addGauge(cpuUsagePeakGaugeName)
.addGauge(memLimitGaugeName)
.addGauge(cachedMemUsageCurrGaugeName)
.addGauge(cachedMemUsagePeakGaugeName)
.addGauge(totMemUsageCurrGaugeName)
.addGauge(totMemUsagePeakGaugeName)
.addGauge(nwBytesLimitGaugeName)
.addGauge(nwBytesUsageCurrGaugeName)
.addGauge(nwBytesUsagePeakGaugeName)
.addGauge(jvmMemoryUsedGaugeName)
.addGauge(jvmMemoryMaxGaugeName)
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
cpuLimitGauge = m.getGauge(cpuLimitGaugeName);
cpuUsageCurrGauge = m.getGauge(cpuUsageCurrGaugeName);
cpuUsagePeakGauge = m.getGauge(cpuUsagePeakGaugeName);
memLimitGauge = m.getGauge(memLimitGaugeName);
cachedMemUsageCurrGauge = m.getGauge(cachedMemUsageCurrGaugeName);
cachedMemUsagePeakGauge = m.getGauge(cachedMemUsagePeakGaugeName);
totMemUsageCurrGauge = m.getGauge(totMemUsageCurrGaugeName);
totMemUsagePeakGauge = m.getGauge(totMemUsagePeakGaugeName);
nwBytesLimitGauge = m.getGauge(nwBytesLimitGaugeName);
nwBytesUsageCurrGauge = m.getGauge(nwBytesUsageCurrGaugeName);
nwBytesUsagePeakGauge = m.getGauge(nwBytesUsagePeakGaugeName);
jvmMemoryUsedGauge = m.getGauge(jvmMemoryUsedGaugeName);
jvmMemoryMaxGauge = m.getGauge(jvmMemoryMaxGaugeName);
}
private long getNextDelay() {
if (counter.get() >= reportingIntervals.length)
return reportingIntervals[reportingIntervals.length - 1];
return reportingIntervals[counter.getAndIncrement()];
}
private void setPayloadAndMetrics() {
// figure out resource usage
long delay = getNextDelay();
try {
StatusPayloads.ResourceUsage usage = evalResourceUsage();
if (usage != null) {
try {
heartbeat.addSingleUsePayload("" + StatusPayloads.Type.ResourceUsage, objectMapper.writeValueAsString(usage));
} catch (JsonProcessingException e) {
logger.warn("Error writing json for resourceUsage payload: " + e.getMessage());
}
cpuLimitGauge.set(Math.round(usage.getCpuLimit() * 100.0));
cpuUsageCurrGauge.set(Math.round(usage.getCpuUsageCurrent() * 100.0));
cpuUsagePeakGauge.set(Math.round(usage.getCpuUsagePeak() * 100.0));
memLimitGauge.set(Math.round(usage.getMemLimit()));
cachedMemUsageCurrGauge.set(Math.round(usage.getMemCacheCurrent()));
cachedMemUsagePeakGauge.set(Math.round(usage.getMemCachePeak()));
totMemUsageCurrGauge.set(Math.round(usage.getTotMemUsageCurrent()));
totMemUsagePeakGauge.set(Math.round(usage.getTotMemUsagePeak()));
nwBytesLimitGauge.set(Math.round(nwBytesLimit));
nwBytesUsageCurrGauge.set(Math.round(usage.getNwBytesCurrent()));
nwBytesUsagePeakGauge.set(Math.round(usage.getNwBytesPeak()));
jvmMemoryUsedGauge.set(Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory());
jvmMemoryMaxGauge.set(Runtime.getRuntime().maxMemory());
if (isBigIncrease(oldUsage, usage) || closeToLimit(usage)) {
delay = Math.min(delay, bigUsageChgReportingIntervalSecs);
}
oldUsage = usage;
}
} catch (Exception e) {
logger.error("Failed to compute resource usage", e);
} finally {
logger.debug("scheduling next metrics report with delay=" + delay);
executor.schedule(this::setPayloadAndMetrics, delay, TimeUnit.SECONDS);
}
}
private boolean closeToLimit(StatusPayloads.ResourceUsage usage) {
if (usage == null)
return false;
if (usage.getCpuUsageCurrent() / usage.getCpuLimit() > 0.9)
return true;
if (usage.getTotMemUsageCurrent() / usage.getMemLimit() > 0.9)
return true;
if (usage.getNwBytesCurrent() / nwBytesLimit > 0.9)
return true;
return false;
}
private boolean isBigIncrease(StatusPayloads.ResourceUsage oldUsage, StatusPayloads.ResourceUsage newUsage) {
if (oldUsage == null || newUsage == null)
return true;
if (isBigIncrease(oldUsage.getCpuUsageCurrent(), newUsage.getCpuUsageCurrent()))
return true;
if (isBigIncrease(oldUsage.getTotMemUsageCurrent(), newUsage.getTotMemUsageCurrent()))
return true;
if (isBigIncrease(oldUsage.getNwBytesCurrent(), newUsage.getNwBytesCurrent()))
return true;
return false;
}
private boolean isBigIncrease(double old, double curr) {
if (old == 0.0)
return curr != 0;
return (curr - old) / old > bigIncreaseThreshold;
}
// todo(sundaram): why is this argument not used?
void start(long intervalSecs) {
executor.schedule(this::setPayloadAndMetrics, getNextDelay(), TimeUnit.SECONDS);
}
@Override
public void close() throws IOException {
executor.shutdownNow();
}
private StatusPayloads.ResourceUsage evalResourceUsage() throws IOException {
final Usage usage = resourceUsageUtils.get();
if (prevStatsGatheredAt == 0L) {
setPreviousStats(usage);
return null;
} else {
double elapsedInSecs =
((double) System.currentTimeMillis() - (double) prevStatsGatheredAt) / 1000.0;
double cpuUsage = ((usage.getCpusSystemTimeSecs() - prev_cpus_system_time_secs) / elapsedInSecs) +
((usage.getCpusUserTimeSecs() - prev_cpus_user_time_secs) / elapsedInSecs);
if (cpuUsage > peakCpuUsage) {
peakCpuUsage = cpuUsage;
}
if (cpuUsage > usage.getCpusLimit()) {
logger.warn("CPU usage {} greater than limit {}, usage={}, elapsedInSecs={}", cpuUsage, usage.getCpusLimit(), usage, elapsedInSecs);
}
if (usage.getMemRssBytes() > peakTotMem)
peakTotMem = usage.getMemRssBytes();
double memCache = Math.max(0.0, usage.getMemRssBytes() - usage.getMemAnonBytes());
if (memCache > peakMemCache)
peakMemCache = memCache;
double readBw = (usage.getNetworkReadBytes() - prev_bytes_read) / elapsedInSecs; // TODO check if byteCounts are already rate counts
double writeBw = (usage.getNetworkWriteBytes() - prev_bytes_written) / elapsedInSecs;
if (readBw > peakBytesRead)
peakBytesRead = readBw;
if (writeBw > peakBytesWritten)
peakBytesWritten = writeBw;
// set previous values to new values
setPreviousStats(usage);
return new StatusPayloads.ResourceUsage(
usage.getCpusLimit(),
cpuUsage,
peakCpuUsage,
StorageUnit.BYTES.toMBs(usage.getMemLimit()),
StorageUnit.BYTES.toMBs(memCache),
StorageUnit.BYTES.toMBs(peakMemCache),
StorageUnit.BYTES.toMBs(usage.getMemRssBytes()),
StorageUnit.BYTES.toMBs(peakTotMem),
Math.max(readBw, writeBw),
Math.max(peakBytesRead, peakBytesWritten));
}
}
private void setPreviousStats(Usage usage) {
prev_cpus_system_time_secs = usage.getCpusSystemTimeSecs();
prev_cpus_user_time_secs = usage.getCpusUserTimeSecs();
prev_bytes_read = usage.getNetworkReadBytes();
prev_bytes_written = usage.getNetworkWriteBytes();
prevStatsGatheredAt = System.currentTimeMillis();
}
}
| 8,435 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/WorkerIndexHistory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import java.util.HashSet;
import java.util.Set;
public class WorkerIndexHistory {
final Set<Integer> runningWorkerIndex = new HashSet<Integer>();
final Set<Integer> terminalWorkerIndex = new HashSet<Integer>();
public synchronized void addToRunningIndex(int workerIndex) {
runningWorkerIndex.add(workerIndex);
}
public synchronized void addToTerminalIndex(int workerIndex) {
terminalWorkerIndex.add(workerIndex);
}
public synchronized boolean isRunningOrTerminal(int workerIndex) {
return runningWorkerIndex.contains(workerIndex)
|| terminalWorkerIndex.contains(workerIndex);
}
public synchronized void clearHistory() {
runningWorkerIndex.clear();
terminalWorkerIndex.clear();
}
}
| 8,436 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/WorkerExecutionOperations.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import java.io.IOException;
public interface WorkerExecutionOperations {
void executeStage(ExecutionDetails setup) throws IOException;
void shutdownStage() throws IOException;
}
| 8,437 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/Heartbeat.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.server.core.Status;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.LinkedBlockingQueue;
import javax.annotation.Nullable;
import lombok.Getter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class Heartbeat {
private static final Logger logger = LoggerFactory.getLogger(Heartbeat.class);
@Getter
private final String jobId;
@Getter
private final int stageNumber;
@Getter
private final int workerIndex;
@Getter
private final int workerNumber;
private final ConcurrentMap<String, String> payloads;
private final BlockingQueue<PayloadPair> singleUsePayloads = new LinkedBlockingQueue<>();
private final Optional<String> host;
Heartbeat(String jobId, int stageNumber, int workerIndex, int workerNumber) {
this(jobId, stageNumber, workerIndex, workerNumber, null);
}
Heartbeat(String jobId, int stageNumber, int workerIndex, int workerNumber, @Nullable String host) {
this.jobId = jobId;
this.stageNumber = stageNumber;
this.workerIndex = workerIndex;
this.workerNumber = workerNumber;
this.host = Optional.ofNullable(host);
payloads = new ConcurrentHashMap<>();
}
void setPayload(String name, String value) {
logger.info("Setting payload " + name);
if (name != null && !name.isEmpty() && value != null)
payloads.put(name, value);
}
boolean clearPayload(String name) {
return payloads.remove(name) != null;
}
void addSingleUsePayload(String name, String value) {
logger.debug("Adding payload {}={}", name, value);
singleUsePayloads.offer(new PayloadPair(name, value));
}
Status getCurrentHeartbeatStatus() {
List<Status.Payload> payloadList = new ArrayList<>();
logger.debug("#Payloads = " + payloads.size());
for (Map.Entry<String, String> entry : payloads.entrySet()) {
logger.debug("Adding payload " + entry.getKey() + " with value " + entry.getValue());
payloadList.add(new Status.Payload(entry.getKey(), entry.getValue()));
}
List<PayloadPair> singleUsePlds = new ArrayList<>();
singleUsePayloads.drainTo(singleUsePlds);
if (!singleUsePlds.isEmpty()) {
Map<String, String> suplds = new HashMap<>();
for (PayloadPair pp : singleUsePlds)
suplds.put(pp.name, pp.value); // eliminates duplicates, keeps the last one
for (Map.Entry<String, String> entry : suplds.entrySet())
payloadList.add(new Status.Payload(entry.getKey(), entry.getValue()));
}
Status status = new Status(jobId, stageNumber, workerIndex, workerNumber, Status.TYPE.HEARTBEAT, "heartbeat", MantisJobState.Noop);
host.ifPresent(status::setHostname);
if (!payloadList.isEmpty())
status.setPayloads(payloadList);
return status;
}
private static class PayloadPair {
String name;
String value;
public PayloadPair(String name, String value) {
this.name = name;
this.value = value;
}
}
}
| 8,438 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/DownloadJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DownloadJob {
private static final Logger logger = LoggerFactory.getLogger(DownloadJob.class);
private URL jobArtifactUrl;
private String jobName;
private String locationToStore;
public DownloadJob(
URL jobArtifactUrl, String jobName,
String locationToStore) {
this.jobArtifactUrl = jobArtifactUrl;
this.locationToStore = locationToStore;
this.jobName = jobName;
}
public static void main(String[] args) throws MalformedURLException {
if (args.length < 3) {
System.err.println("usage: job_artifact_url job_name location_to_store");
System.exit(1);
}
logger.info("parameters, jobArtifactUrl: " + args[0]);
logger.info("parameters, jobName: " + args[1]);
logger.info("parameters, locationToStore: " + args[2]);
new DownloadJob(new URL(args[0]), args[1], args[2]).execute();
}
public void execute() {
String jobJarFile = jobArtifactUrl.getFile();
String jarName = jobJarFile.substring(jobJarFile.lastIndexOf('/') + 1);
Path path = Paths.get(locationToStore, jobName,
"lib");
logger.info("Started writing job to tmp directory: " + path);
// download file to /tmp, then add file location
try (InputStream is = jobArtifactUrl.openStream()) {
Files.createDirectories(path);
try (OutputStream os = Files.newOutputStream(Paths.get(path.toString(), jarName))) {
byte[] bytes = new byte[2048];
int read = 0;
while ((read = is.read(bytes)) >= 0) {
os.write(bytes, 0, read);
}
}
} catch (IOException e1) {
logger.error("Failed to write job to local store at path: " + path, e1);
throw new RuntimeException(e1);
}
logger.info("Finished writing job to tmp directory: " + path);
}
}
| 8,439 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/ExecuteStageRequestService.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.server.core.BaseService;
import io.mantisrx.server.core.ExecuteStageRequest;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.WrappedExecuteStageRequest;
import java.io.IOException;
import java.util.Optional;
import java.util.ServiceLoader;
import org.apache.flink.util.UserCodeClassLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.Subscription;
import rx.functions.Func1;
import rx.subjects.PublishSubject;
public class ExecuteStageRequestService extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(ExecuteStageRequestService.class);
private final Observable<WrappedExecuteStageRequest> executeStageRequestObservable;
private final Observer<Observable<Status>> tasksStatusObserver;
private final WorkerExecutionOperations executionOperations;
private final Optional<String> jobProviderClass;
private final Optional<Job> mantisJob;
/**
* This class loader should be set as the context class loader for threads that may dynamically
* load user code.
*/
private final UserCodeClassLoader userCodeClassLoader;
private Subscription subscription;
public ExecuteStageRequestService(
Observable<WrappedExecuteStageRequest> executeStageRequestObservable,
Observer<Observable<Status>> tasksStatusObserver,
WorkerExecutionOperations executionOperations,
Optional<String> jobProviderClass,
UserCodeClassLoader userCodeClassLoader,
Optional<Job> mantisJob) {
this.executeStageRequestObservable = executeStageRequestObservable;
this.tasksStatusObserver = tasksStatusObserver;
this.executionOperations = executionOperations;
this.jobProviderClass = jobProviderClass;
this.userCodeClassLoader = userCodeClassLoader;
this.mantisJob = mantisJob;
}
@Override
public void start() {
subscription = executeStageRequestObservable
// map to request with status observer
.map(new Func1<WrappedExecuteStageRequest, TrackedExecuteStageRequest>() {
@Override
public TrackedExecuteStageRequest call(
WrappedExecuteStageRequest executeRequest) {
PublishSubject<Status> statusSubject = PublishSubject.create();
tasksStatusObserver.onNext(statusSubject);
return new TrackedExecuteStageRequest(executeRequest, statusSubject);
}
})
// get provider from jar, return tracked MantisJob
.flatMap(new Func1<TrackedExecuteStageRequest, Observable<ExecutionDetails>>() {
@SuppressWarnings("rawtypes") // raw type due to unknown type for mantis job
@Override
public Observable<ExecutionDetails> call(TrackedExecuteStageRequest executeRequest) {
ExecuteStageRequest executeStageRequest =
executeRequest.getExecuteRequest().getRequest();
Job mantisJob;
ClassLoader cl = null;
try {
if (!ExecuteStageRequestService.this.mantisJob.isPresent()) {
// first of all, get a user-code classloader
// this may involve downloading the job's JAR files and/or classes
logger.info("Loading JAR files for task {}.", this);
cl = userCodeClassLoader.asClassLoader();
if (jobProviderClass.isPresent()) {
logger.info("loading job main class " + jobProviderClass.get());
final MantisJobProvider jobProvider = InstantiationUtil.instantiate(
jobProviderClass.get(), MantisJobProvider.class, cl);
mantisJob = jobProvider.getJobInstance();
} else {
logger.info("using serviceLoader to get job instance");
ServiceLoader<MantisJobProvider> provider = ServiceLoader.load(
MantisJobProvider.class, cl);
// should only be a single provider, check is made in master
MantisJobProvider mantisJobProvider = provider.iterator()
.next();
mantisJob = mantisJobProvider.getJobInstance();
}
} else {
cl = userCodeClassLoader.asClassLoader();
mantisJob = ExecuteStageRequestService.this.mantisJob.get();
}
} catch (Throwable e) {
logger.error("Failed to load job class", e);
executeRequest.getStatus().onError(e);
return Observable.empty();
}
logger.info("Executing job {}", mantisJob);
return Observable.just(new ExecutionDetails(executeRequest.getExecuteRequest(),
executeRequest.getStatus(), mantisJob, cl, executeStageRequest.getParameters()));
}
})
.subscribe(new Observer<ExecutionDetails>() {
@Override
public void onCompleted() {
logger.error("Execute stage observable completed"); // should never occur
try {
executionOperations.shutdownStage();
} catch (IOException e) {
logger.error("Failed to close stage cleanly", e);
}
}
@Override
public void onError(Throwable e) {
logger.error("Execute stage observable threw exception", e);
}
@Override
public void onNext(final ExecutionDetails executionDetails) {
logger.info("Executing stage for job ID: " + executionDetails.getExecuteStageRequest().getRequest().getJobId());
Thread t = new Thread("mantis-worker-thread-" + executionDetails.getExecuteStageRequest().getRequest().getJobId()) {
@Override
public void run() {
// Add ports here
try {
executionOperations.executeStage(executionDetails);
} catch (Throwable t) {
logger.error("Failed to execute job stage", t);
}
}
};
// rebuild class path, job jar + parent class loader
// job jar to reference third party libraries and resources
// parent to reference worker code
ClassLoader cl = executionDetails.getClassLoader();
t.setContextClassLoader(cl);
t.setDaemon(true);
t.start();
}
});
}
@Override
public void shutdown() {
subscription.unsubscribe();
try {
logger.info("Shutting down execution operations");
executionOperations.shutdownStage();
} catch (IOException e) {
logger.error("Failed to close cleanly", e);
}
}
@Override
public void enterActiveMode() {}
}
| 8,440 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/RunningWorker.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import static io.mantisrx.server.core.utils.StatusConstants.STATUS_MESSAGE_FORMAT;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.runtime.StageConfig;
import io.mantisrx.runtime.WorkerInfo;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.Status;
import io.mantisrx.server.core.Status.TYPE;
import io.mantisrx.server.core.domain.WorkerId;
import io.mantisrx.server.worker.mesos.VirtualMachineTaskStatus;
import java.util.Iterator;
import java.util.concurrent.CountDownLatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.subjects.PublishSubject;
@SuppressWarnings("rawtypes")
public class RunningWorker {
private static final Logger logger = LoggerFactory.getLogger(RunningWorker.class);
private final int totalStagesNet;
private Action0 onTerminateCallback;
private Action0 onCompleteCallback;
private Action1<Throwable> onErrorCallback;
private CountDownLatch blockUntilTerminate = new CountDownLatch(1);
private Job job;
private SchedulingInfo schedulingInfo;
private StageConfig stage;
private Observer<Status> jobStatus;
private String jobId;
private final int stageNum;
private final int workerNum;
private final int workerIndex;
private final String jobName;
private final int totalStages;
private final int metricsPort;
private final Observer<VirtualMachineTaskStatus> vmTaskStatusObserver;
private final Observable<Integer> stageTotalWorkersObservable;
private final Observable<JobSchedulingInfo> jobSchedulingInfoObservable;
private final Iterator<Integer> ports;
private final PublishSubject<Boolean> requestSubject;
private Context context;
private final WorkerInfo workerInfo;
public RunningWorker(Builder builder) {
this.workerInfo = builder.workerInfo;
this.requestSubject = builder.requestSubject;
this.job = builder.job;
this.ports = builder.ports;
this.metricsPort = builder.metricsPort;
this.schedulingInfo = builder.schedulingInfo;
this.stage = builder.stage;
this.jobId = builder.jobId;
this.stageNum = builder.stageNum;
this.workerNum = builder.workerNum;
this.workerIndex = builder.workerIndex;
this.jobName = builder.jobName;
this.totalStages = builder.totalStages;
this.totalStagesNet = this.totalStages - (builder.hasJobMaster ? 1 : 0);
this.vmTaskStatusObserver = builder.vmTaskStatusObserver;
this.jobStatus = builder.jobStatus;
this.stageTotalWorkersObservable = builder.stageTotalWorkersObservable;
this.jobSchedulingInfoObservable = builder.jobSchedulingInfoObservable;
this.onTerminateCallback = new Action0() {
@Override
public void call() {
blockUntilTerminate.countDown();
}
};
this.onCompleteCallback = new Action0() {
@Override
public void call() {
logger.info("JobId: " + jobId + " stage: " + stageNum + ", completed");
// setup a timeout to call forced exit as sure way to exit
new Thread() {
@Override
public void run() {
try {
sleep(3000);
System.exit(1);
} catch (Exception e) {
logger.error("Ignoring exception during exit: " + e.getMessage(), e);
}
}
}.start();
signalCompleted();
}
};
this.onErrorCallback = new Action1<Throwable>() {
@Override
public void call(Throwable t) {
signalFailed(t);
}
};
}
public void signalStartedInitiated() {
logger.info("JobId: " + jobId + ", stage: " + stageNum + " workerIndex: " + workerIndex + " workerNumber: " + workerNum + ","
+ " signaling started initiated");
vmTaskStatusObserver.onNext(new VirtualMachineTaskStatus(
new WorkerId(jobId, workerIndex, workerNum).getId(),
VirtualMachineTaskStatus.TYPE.STARTED, jobName + ", " +
String.format(STATUS_MESSAGE_FORMAT, stageNum, workerIndex, workerNum, "started")));
// indicate start success
requestSubject.onNext(true);
requestSubject.onCompleted();
jobStatus.onNext(new Status(jobId, stageNum, workerIndex, workerNum
, TYPE.INFO, "Beginning job execution " +
workerIndex, MantisJobState.StartInitiated));
}
public void signalStarted() {
logger.info("JobId: " + jobId + ", " + String.format(STATUS_MESSAGE_FORMAT, stageNum, workerIndex, workerNum, "signaling started"));
jobStatus.onNext(new Status(jobId, stageNum, workerIndex, workerNum,
TYPE.INFO, String.format(STATUS_MESSAGE_FORMAT, stageNum, workerIndex, workerNum, "running"),
MantisJobState.Started));
}
public void signalCompleted() {
logger.info("JobId: " + jobId + ", stage: " + stageNum + " workerIndex: " + workerIndex + " workerNumber: " + workerNum + ","
+ " signaling completed");
jobStatus.onNext(new Status(jobId, stageNum, workerIndex, workerNum,
TYPE.INFO, String.format(STATUS_MESSAGE_FORMAT, stageNum, workerIndex, workerNum, "completed"),
MantisJobState.Completed));
// send complete status
jobStatus.onCompleted();
// send completed status to vm service
vmTaskStatusObserver.onNext(new VirtualMachineTaskStatus(
new WorkerId(jobId, workerIndex, workerNum).getId(),
VirtualMachineTaskStatus.TYPE.COMPLETED, jobName + ", " +
String.format(STATUS_MESSAGE_FORMAT, stageNum, workerIndex, workerNum, "completed")));
}
public void signalFailed(Throwable t) {
logger.info("JobId: " + jobId + ", stage: " + stageNum + " workerIndex: " + workerIndex + " workerNumber: " + workerNum + ","
+ " signaling failed");
logger.error("Worker failure detected, shutting down job", t);
jobStatus.onNext(new Status(jobId, stageNum, workerIndex, workerNum,
TYPE.INFO, String.format(STATUS_MESSAGE_FORMAT, stageNum, workerIndex, workerNum, "failed. error: " + t.getMessage()),
MantisJobState.Failed));
}
public void waitUntilTerminate() {
try {
blockUntilTerminate.await();
} catch (InterruptedException e) {
logger.error("Thread interrupted during await call", e);
}
}
public Context getContext() {
return context;
}
public void setContext(Context context) {
this.context = context;
}
public WorkerInfo getWorkerInfo() {
return workerInfo;
}
public StageSchedulingInfo stageSchedulingInfo(int stageNum) {
return schedulingInfo.forStage(stageNum);
}
public StageSchedulingInfo stageSchedulingInfo() {
return schedulingInfo.forStage(stageNum);
}
public Observable<Integer> getSourceStageTotalWorkersObservable() {
return this.stageTotalWorkersObservable;
}
public Observable<JobSchedulingInfo> getJobSchedulingInfoObservable() { return this.jobSchedulingInfoObservable; }
public Job getJob() {
return job;
}
public Iterator<Integer> getPorts() {
return ports;
}
public int getMetricsPort() {
return metricsPort;
}
public StageConfig getStage() {
return stage;
}
public SchedulingInfo getSchedulingInfo() {
return schedulingInfo;
}
public Action0 getOnTerminateCallback() {
return onTerminateCallback;
}
public Action0 getOnCompleteCallback() {
return onCompleteCallback;
}
public Action1<Throwable> getOnErrorCallback() {
return onErrorCallback;
}
public Observer<Status> getJobStatus() {
return jobStatus;
}
public String getJobId() {
return jobId;
}
public int getStageNum() {
return stageNum;
}
public int getWorkerNum() {
return workerNum;
}
public int getWorkerIndex() {
return workerIndex;
}
public String getJobName() {
return jobName;
}
public int getTotalStagesNet() {
return totalStagesNet;
}
public Observer<VirtualMachineTaskStatus> getVmTaskStatusObserver() {
return vmTaskStatusObserver;
}
@Override
public String toString() {
return "RunningWorker ["
+ job + ", schedulingInfo=" + schedulingInfo + ", stage="
+ stage + ", jobStatus=" + jobStatus + ", jobId=" + jobId
+ ", stageNum=" + stageNum + ", workerNum=" + workerNum
+ ", workerIndex=" + workerIndex + ", jobName=" + jobName
+ ", totalStages=" + totalStages + ", metricsPort="
+ metricsPort + ", vmTaskStatusObserver="
+ vmTaskStatusObserver + ", ports=" + ports
+ ", requestSubject=" + requestSubject + ", context=" + context
+ ", workerInfo=" + workerInfo + "]";
}
@SuppressWarnings("rawtypes")
public static class Builder {
private WorkerInfo workerInfo;
private Job job;
private Iterator<Integer> ports;
private int metricsPort;
private SchedulingInfo schedulingInfo;
private StageConfig stage;
private Observer<Status> jobStatus;
private String jobId;
private int stageNum;
private int workerNum;
private int workerIndex;
private String jobName;
private int totalStages;
private Observer<VirtualMachineTaskStatus> vmTaskStatusObserver;
private Observable<Integer> stageTotalWorkersObservable;
private Observable<JobSchedulingInfo> jobSchedulingInfoObservable;
private PublishSubject<Boolean> requestSubject;
private boolean hasJobMaster = false;
public Builder workerInfo(WorkerInfo workerInfo) {
this.workerInfo = workerInfo;
return this;
}
public Builder ports(Iterator<Integer> ports) {
this.ports = ports;
return this;
}
public Builder job(Job job) {
this.job = job;
return this;
}
public Builder requestSubject(PublishSubject<Boolean> requestSubject) {
this.requestSubject = requestSubject;
return this;
}
public Builder stage(StageConfig stage) {
this.stage = stage;
return this;
}
public Builder schedulingInfo(SchedulingInfo schedulingInfo) {
this.schedulingInfo = schedulingInfo;
return this;
}
public Builder jobId(String jobId) {
this.jobId = jobId;
return this;
}
public Builder jobStatusObserver(Observer<Status> jobStatus) {
this.jobStatus = jobStatus;
return this;
}
public Builder stageNum(int stageNum) {
this.stageNum = stageNum;
return this;
}
public Builder metricsPort(int metricsPort) {
this.metricsPort = metricsPort;
return this;
}
public Builder workerNum(int workerNum) {
this.workerNum = workerNum;
return this;
}
public Builder workerIndex(int workerIndex) {
this.workerIndex = workerIndex;
return this;
}
public Builder jobName(String jobName) {
this.jobName = jobName;
return this;
}
public Builder totalStages(int totalStages) {
this.totalStages = totalStages;
return this;
}
public Builder vmTaskStatusObservable(Observer<VirtualMachineTaskStatus> vmTaskStatusObserver) {
this.vmTaskStatusObserver = vmTaskStatusObserver;
return this;
}
public Builder hasJobMaster(boolean b) {
this.hasJobMaster = b;
return this;
}
public Builder stageTotalWorkersObservable(Observable<Integer> stageTotalWorkersObservable) {
this.stageTotalWorkersObservable = stageTotalWorkersObservable;
return this;
}
public Builder jobSchedulingInfoObservable(Observable<JobSchedulingInfo> jobSchedulingInfoObservable) {
this.jobSchedulingInfoObservable = jobSchedulingInfoObservable;
return this;
}
public RunningWorker build() {
return new RunningWorker(this);
}
}
}
| 8,441 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/DataDroppedPayloadSetter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker;
import static com.mantisrx.common.utils.MantisMetricStringConstants.DROP_OPERATOR_INCOMING_METRIC_GROUP;
import static io.mantisrx.server.core.stats.MetricStringConstants.DATA_DROP_METRIC_GROUP;
import static io.mantisrx.server.core.stats.MetricStringConstants.DROP_COUNT;
import static io.mantisrx.server.core.stats.MetricStringConstants.ON_NEXT_COUNT;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.server.core.StatusPayloads;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.reactivx.mantis.operators.DropOperator;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class DataDroppedPayloadSetter implements Closeable {
private static final String metricNamePrefix = DROP_OPERATOR_INCOMING_METRIC_GROUP;
private static final Logger logger = LoggerFactory.getLogger(DataDroppedPayloadSetter.class);
private final Heartbeat heartbeat;
private final ObjectMapper objectMapper = new ObjectMapper();
private final ScheduledThreadPoolExecutor executor;
private ScheduledFuture<?> future;
private final Gauge dropCountGauge;
private final Gauge onNextCountGauge;
DataDroppedPayloadSetter(Heartbeat heartbeat) {
this.heartbeat = heartbeat;
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
executor = new ScheduledThreadPoolExecutor(1);
Metrics m = new Metrics.Builder()
.name(DATA_DROP_METRIC_GROUP)
.addGauge(DROP_COUNT)
.addGauge(ON_NEXT_COUNT)
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
dropCountGauge = m.getGauge(DROP_COUNT);
onNextCountGauge = m.getGauge(ON_NEXT_COUNT);
}
protected void setPayload(final long intervalSecs) {
final Collection<Metrics> metrics = MetricsRegistry.getInstance().getMetrics(metricNamePrefix);
long totalDropped = 0L;
long totalOnNext = 0L;
try {
if (metrics != null && !metrics.isEmpty()) {
//logger.info("Got " + metrics.size() + " metrics for DropOperator");
for (Metrics m : metrics) {
final Counter dropped = m.getCounter("" + DropOperator.Counters.dropped);
final Counter onNext = m.getCounter("" + DropOperator.Counters.onNext);
if (dropped != null)
totalDropped += dropped.value();
else
logger.warn("Unexpected to get null dropped counter for metric " + m.getMetricGroupId().id());
if (onNext != null)
totalOnNext += onNext.value();
else
logger.warn("Unexpected to get null onNext counter for metric " + m.getMetricGroupId().id());
}
final StatusPayloads.DataDropCounts dataDrop = new StatusPayloads.DataDropCounts(totalOnNext, totalDropped);
try {
heartbeat.addSingleUsePayload("" + StatusPayloads.Type.IncomingDataDrop, objectMapper.writeValueAsString(dataDrop));
} catch (JsonProcessingException e) {
logger.warn("Error writing json for dataDrop payload: " + e.getMessage());
}
dropCountGauge.set(dataDrop.getDroppedCount());
onNextCountGauge.set(dataDrop.getOnNextCount());
} else
logger.debug("Got no metrics from DropOperator");
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
void start(final long intervalSecs) {
future =
executor.scheduleAtFixedRate(
() -> setPayload(intervalSecs),
intervalSecs,
intervalSecs,
TimeUnit.SECONDS);
}
@Override
public void close() throws IOException {
if (future != null) {
future.cancel(false);
}
executor.shutdownNow();
}
}
| 8,442 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/JobMasterService.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.common.SystemParameters;
import io.mantisrx.common.metrics.measurement.GaugeMeasurement;
import io.mantisrx.common.metrics.measurement.Measurements;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.parameter.SourceJobParameters;
import io.mantisrx.server.core.Service;
import io.mantisrx.server.core.stats.MetricStringConstants;
import io.mantisrx.server.master.client.MantisMasterGateway;
import io.mantisrx.server.worker.client.WorkerMetricsClient;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.util.List;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.Subscription;
import rx.functions.Action0;
import rx.functions.Action1;
// job master service is the one responsible for autoscaling
// it represents stage 0.
public class JobMasterService implements Service {
private static final Logger logger = LoggerFactory.getLogger(JobMasterService.class);
private final static ObjectMapper objectMapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private final String jobId;
private final WorkerMetricsClient workerMetricsClient;
private final AutoScaleMetricsConfig autoScaleMetricsConfig;
private final Observer<MetricData> metricObserver;
private final JobAutoScaler jobAutoScaler;
private final Context context;
private final Action0 observableOnCompleteCallback;
private final Action1<Throwable> observableOnErrorCallback;
private final Action0 observableOnTerminateCallback;
private final MantisMasterGateway masterClientApi;
private Subscription subscription = null;
public JobMasterService(final String jobId,
final SchedulingInfo schedInfo,
final WorkerMetricsClient workerMetricsClient,
final AutoScaleMetricsConfig autoScaleMetricsConfig,
final MantisMasterGateway masterClientApi,
final Context context,
final Action0 observableOnCompleteCallback,
final Action1<Throwable> observableOnErrorCallback,
final Action0 observableOnTerminateCallback) {
this.jobId = jobId;
this.workerMetricsClient = workerMetricsClient;
this.autoScaleMetricsConfig = autoScaleMetricsConfig;
this.masterClientApi = masterClientApi;
this.jobAutoScaler = new JobAutoScaler(jobId, schedInfo, masterClientApi, context);
this.metricObserver = new WorkerMetricHandler(jobId, jobAutoScaler.getObserver(), masterClientApi, autoScaleMetricsConfig).initAndGetMetricDataObserver();
this.observableOnCompleteCallback = observableOnCompleteCallback;
this.observableOnErrorCallback = observableOnErrorCallback;
this.observableOnTerminateCallback = observableOnTerminateCallback;
this.context = context;
}
private Measurements handleMetricEvent(final String ev) {
try {
final Measurements measurements = objectMapper.readValue(ev, Measurements.class);
final String jobId = measurements.getTags().get(MetricStringConstants.MANTIS_JOB_ID);
final int workerIdx = Integer.parseInt(measurements.getTags().get(MetricStringConstants.MANTIS_WORKER_INDEX));
int stage = Integer.parseInt(measurements.getTags().get(MetricStringConstants.MANTIS_STAGE_NUM));
final int workerNum = Integer.parseInt(measurements.getTags().get(MetricStringConstants.MANTIS_WORKER_NUM));
List<GaugeMeasurement> gauges = (List<GaugeMeasurement>) measurements.getGauges();
// Metric is not from current job, it is from the source job
if (jobId != this.jobId) {
// Funnel source job metric into the 1st stage
stage = 1;
if (gauges.isEmpty()) {
gauges = measurements.getCounters().stream().map(counter ->
new GaugeMeasurement(counter.getEvent(), counter.getCount())).collect(Collectors.toList());
}
}
metricObserver.onNext(new MetricData(jobId, stage, workerIdx, workerNum, measurements.getName(), gauges));
return measurements;
} catch (JsonProcessingException e) {
logger.error("failed to parse json", e);
} catch (Exception e) {
logger.error("caught exception", e);
}
return null;
}
@Override
public void start() {
logger.info("Starting JobMasterService");
logger.info("Starting Job Auto Scaler");
jobAutoScaler.start();
final WorkerMetricSubscription workerMetricSubscription = new WorkerMetricSubscription(jobId, workerMetricsClient, autoScaleMetricsConfig.getMetricGroups());
Observable<Observable<MantisServerSentEvent>> metrics = workerMetricSubscription.getMetricsClient().getResults();
boolean isSourceJobMetricEnabled = (boolean) context.getParameters().get(
SystemParameters.JOB_MASTER_AUTOSCALE_SOURCEJOB_METRIC_PARAM, false);
if (isSourceJobMetricEnabled) {
metrics = metrics.mergeWith(getSourceJobMetrics());
}
subscription = Observable.merge(metrics)
.map(event -> handleMetricEvent(event.getEventAsString()))
.doOnTerminate(observableOnTerminateCallback)
.doOnCompleted(observableOnCompleteCallback)
.doOnError(observableOnErrorCallback)
.subscribe();
}
protected Observable<Observable<MantisServerSentEvent>> getSourceJobMetrics() {
List<SourceJobParameters.TargetInfo> targetInfos = SourceJobParameters.parseTargetInfo(
(String) context.getParameters().get(SystemParameters.JOB_MASTER_AUTOSCALE_SOURCEJOB_TARGET_PARAM, "{}"));
if (targetInfos.isEmpty()) {
targetInfos = SourceJobParameters.parseInputParameters(context);
}
targetInfos = SourceJobParameters.enforceClientIdConsistency(targetInfos, jobId);
String additionalDropMetricPatterns =
(String) context.getParameters().get(SystemParameters.JOB_MASTER_AUTOSCALE_SOURCEJOB_DROP_METRIC_PATTERNS_PARAM, "");
autoScaleMetricsConfig.addSourceJobDropMetrics(additionalDropMetricPatterns);
SourceJobWorkerMetricsSubscription sourceSub = new SourceJobWorkerMetricsSubscription(
targetInfos, masterClientApi, workerMetricsClient, autoScaleMetricsConfig);
return sourceSub.getResults();
}
@Override
public void shutdown() {
if (subscription != null) {
subscription.unsubscribe();
}
}
@Override
public void enterActiveMode() {
}
}
| 8,443 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/JobMasterStageConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import io.mantisrx.runtime.StageConfig;
public class JobMasterStageConfig extends StageConfig<Void, Void> {
public JobMasterStageConfig(String description) {
super(description, null, null, INPUT_STRATEGY.NONE_SPECIFIED);
}
}
| 8,444 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/Util.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import org.slf4j.Logger;
public class Util {
private static final Logger log = org.slf4j.LoggerFactory.getLogger(Util.class);
public static double getEffectiveValue(StageSchedulingInfo stageSchedulingInfo, StageScalingPolicy.ScalingReason type, double value) {
switch (type) {
case CPU:
return 100.0 * value / stageSchedulingInfo.getMachineDefinition().getCpuCores();
case Memory:
return 100.0 * value / stageSchedulingInfo.getMachineDefinition().getMemoryMB();
case JVMMemory:
return 100 * (value / (stageSchedulingInfo.getMachineDefinition().getMemoryMB() * 1024 * 1024));
case DataDrop:
case KafkaLag:
case UserDefined:
case KafkaProcessed:
return value;
case Network:
// value is in bytes, multiply by 8, divide by M
return 100.0 * value * 8 / (1024.0 * 1024.0 * stageSchedulingInfo.getMachineDefinition().getNetworkMbps());
default:
// Identity is the default transformation
return value;
}
}
}
| 8,445 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/WorkerMetricHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import static io.mantisrx.server.core.stats.MetricStringConstants.*;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.server.core.*;
import io.mantisrx.server.core.stats.MetricStringConstants;
import io.mantisrx.server.master.client.MantisMasterGateway;
import io.mantisrx.shaded.com.google.common.cache.Cache;
import io.mantisrx.shaded.com.google.common.cache.CacheBuilder;
import io.reactivx.mantis.operators.DropOperator;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.Subscriber;
import rx.Subscription;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.observers.SerializedObserver;
import rx.schedulers.Schedulers;
import rx.subjects.PublishSubject;
/* package */ class WorkerMetricHandler {
private static final Logger logger = LoggerFactory.getLogger(WorkerMetricHandler.class);
private final PublishSubject<MetricData> metricDataSubject = PublishSubject.create();
private final Observer<JobAutoScaler.Event> jobAutoScaleObserver;
private final MantisMasterGateway masterClientApi;
private final AutoScaleMetricsConfig autoScaleMetricsConfig;
private final MetricAggregator metricAggregator;
private final Map<Integer, Integer> numWorkersByStage = new HashMap<>();
private final Map<Integer, List<WorkerHost>> workerHostsByStage = new HashMap<>();
private final String jobId;
private final Func1<Integer, Integer> lookupNumWorkersByStage = stage -> {
if (numWorkersByStage.containsKey(stage)) {
return numWorkersByStage.get(stage);
} else {
logger.warn("num workers for stage {} not known", stage);
return -1;
}
};
public WorkerMetricHandler(final String jobId,
final Observer<JobAutoScaler.Event> jobAutoScaleObserver,
final MantisMasterGateway masterClientApi,
final AutoScaleMetricsConfig autoScaleMetricsConfig) {
this.jobId = jobId;
this.jobAutoScaleObserver = jobAutoScaleObserver;
this.masterClientApi = masterClientApi;
this.autoScaleMetricsConfig = autoScaleMetricsConfig;
this.metricAggregator = new MetricAggregator(autoScaleMetricsConfig);
}
public Observer<MetricData> initAndGetMetricDataObserver() {
start();
return new SerializedObserver<>(metricDataSubject);
}
private Map<String, GaugeData> getAggregates(List<Map<String, GaugeData>> dataPointsList) {
final Map<String, List<GaugeData>> transformed = new HashMap<>();
for (Map<String, GaugeData> datapoint : dataPointsList) {
for (Map.Entry<String, GaugeData> gauge : datapoint.entrySet()) {
if (!transformed.containsKey(gauge.getKey())) {
transformed.put(gauge.getKey(), new ArrayList<>());
}
transformed.get(gauge.getKey()).add(gauge.getValue());
}
}
return metricAggregator.getAggregates(transformed);
}
private class StageMetricDataOperator implements Observable.Operator<Object, MetricData> {
private static final int killCooldownSecs = 600;
private final Pattern hostExtractorPattern = Pattern.compile(".+:.+:sockAddr=/(?<host>.+)");
private final int stage;
private final Func1<Integer, Integer> numStageWorkersFn;
private final int valuesToKeep = 2;
private final AutoScaleMetricsConfig autoScaleMetricsConfig;
private final ConcurrentMap<Integer, WorkerMetrics> workersMap = new ConcurrentHashMap<>();
private final ConcurrentMap<String, WorkerMetrics> sourceJobWorkersMap = new ConcurrentHashMap<>();
private final Cache<String, String> sourceJobMetricsRecent = CacheBuilder.newBuilder()
.expireAfterWrite(1, TimeUnit.MINUTES)
.build();
private final WorkerOutlier workerOutlier;
private final TimeBufferedWorkerOutlier workerOutlierForSourceJobMetrics;
private final Map<Integer, Integer> workerNumberByIndex = new HashMap<>();
public StageMetricDataOperator(final int stage,
final Func1<Integer, Integer> numStageWorkersFn,
final AutoScaleMetricsConfig autoScaleMetricsConfig) {
logger.debug("setting operator for stage " + stage);
this.stage = stage;
this.numStageWorkersFn = numStageWorkersFn;
this.autoScaleMetricsConfig = autoScaleMetricsConfig;
Action1<Integer> workerResubmitFunc = workerIndex -> {
try {
final int workerNumber;
if (workerNumberByIndex.containsKey(workerIndex)) {
workerNumber = workerNumberByIndex.get(workerIndex);
} else {
logger.error("outlier resubmit FAILED. worker number not found for worker index {} stage {}", workerIndex, stage);
return;
}
if (resubmitOutlierWorkerEnabled()) {
logger.info("resubmitting worker job {} stage {} idx {} workerNum {} (dropping excessive data compared to others)",
jobId, stage, workerIndex, workerNumber);
masterClientApi.resubmitJobWorker(jobId, "JobMaster", workerNumber, "dropping excessive data compared to others in stage")
.onErrorResumeNext(throwable -> {
logger.error("caught error ({}) when resubmitting outlier worker num {}", throwable.getMessage(), workerNumber);
return Observable.empty();
})
.subscribe();
} else {
logger.info("resubmitOutlier property is disabled. Not killing worker job {} stage {} idx {} workerNum {} (dropping excessive data compared to others)",
jobId, stage, workerIndex, workerNumber);
}
} catch (Exception e) {
logger.warn("Can't resubmit outlier worker idx {} error {}", workerIndex, e.getMessage(), e);
}
};
this.workerOutlier = new WorkerOutlier(killCooldownSecs, workerResubmitFunc);
this.workerOutlierForSourceJobMetrics = new TimeBufferedWorkerOutlier(killCooldownSecs, metricsIntervalSeconds, workerIndex -> {
List<WorkerHost> candidates = workerHostsByStage.get(stage);
if (candidates != null) {
candidates.stream().filter(h -> h.getWorkerIndex() == workerIndex).map(WorkerHost::getHost).findFirst().ifPresent(host ->
lookupWorkersByHost(host).stream().forEach(i -> workerResubmitFunc.call(i)));
}
});
}
private boolean resubmitOutlierWorkerEnabled() {
final String resubmitOutlierWorkerProp =
"mantis.worker.jobmaster.outlier.worker.resubmit";
final String enableOutlierWorkerResubmit = "true";
final boolean resubmitOutlierWorker =
Boolean.valueOf(
ServiceRegistry.INSTANCE.getPropertiesService()
.getStringValue(resubmitOutlierWorkerProp, enableOutlierWorkerResubmit));
return resubmitOutlierWorker;
}
private List<Integer> lookupWorkersByHost(String host) {
List<WorkerHost> candidates = workerHostsByStage.get(stage);
if (candidates != null) {
return candidates.stream().filter(h -> h.getHost().equals(host)).map(WorkerHost::getWorkerIndex).collect(Collectors.toList());
}
return new ArrayList<>();
}
private void addDataPoint(final MetricData datapoint) {
final int workerIndex = datapoint.getWorkerIndex();
logger.debug("adding data point for worker idx={} data={}", workerIndex, datapoint);
WorkerMetrics workerMetrics = workersMap.get(workerIndex);
if (workerMetrics == null) {
workerMetrics = new WorkerMetrics(valuesToKeep);
workersMap.put(workerIndex, workerMetrics);
}
final MetricData transformedMetricData = workerMetrics.addDataPoint(datapoint.getMetricGroupName(), datapoint);
if (transformedMetricData.getMetricGroupName().equals(DATA_DROP_METRIC_GROUP)) {
final Map<String, Double> dataDropGauges = transformedMetricData.getGaugeData().getGauges();
if (dataDropGauges.containsKey(DROP_PERCENT)) {
workerOutlier.addDataPoint(workerIndex,
dataDropGauges.get(DROP_PERCENT), numStageWorkersFn.call(stage));
}
}
workerNumberByIndex.put(workerIndex, datapoint.getWorkerNumber());
// remove any data for workers with index that don't exist anymore (happens when stage scales down)
int maxIdx = 0;
synchronized (workersMap) {
for (Integer idx : workersMap.keySet()) {
maxIdx = Math.max(maxIdx, idx);
}
}
final Integer numWorkers = numStageWorkersFn.call(stage);
if (numWorkers > -1) {
for (int idx = numWorkers; idx <= maxIdx; idx++) {
workersMap.remove(idx);
}
}
}
private void addSourceJobDataPoint(final MetricData datapoint) {
final String sourceJobId = datapoint.getJobId();
final int workerIndex = datapoint.getWorkerIndex();
String sourceWorkerKey = sourceJobId + ":" + workerIndex;
WorkerMetrics workerMetrics = sourceJobWorkersMap.get(sourceWorkerKey);
if (workerMetrics == null) {
workerMetrics = new WorkerMetrics(valuesToKeep);
sourceJobWorkersMap.put(sourceWorkerKey, workerMetrics);
}
workerMetrics.addDataPoint(datapoint.getMetricGroupName(), datapoint);
String sourceMetricKey = sourceWorkerKey + ":" + datapoint.getMetricGroupName();
sourceJobMetricsRecent.put(sourceMetricKey, sourceMetricKey);
// Detect outlier on sourcejob drops, if high percentage of drops are concentrated on few workers.
Matcher matcher = hostExtractorPattern.matcher(datapoint.getMetricGroupName());
if (matcher.matches()) {
// From the sourcejob drop metric, we only know the sockAddr of the downstream worker. Multiple worker
// may be running on the same machine. We need to count that evenly in the outlier detector.
List<Integer> workerIndices = lookupWorkersByHost(matcher.group("host"));
for (Map.Entry<String, Double> gauge: datapoint.getGaugeData().getGauges().entrySet()) {
if (autoScaleMetricsConfig.isSourceJobDropMetric(datapoint.getMetricGroupName(), gauge.getKey())) {
workerIndices.stream().forEach(i ->
workerOutlierForSourceJobMetrics.addDataPoint(i, gauge.getValue() / workerIndices.size(), numStageWorkersFn.call(stage)));
}
}
}
}
private static final int metricsIntervalSeconds = 30; // TODO make it configurable
@Override
public Subscriber<? super MetricData> call(final Subscriber<? super Object> child) {
child.add(Schedulers.computation().createWorker().schedulePeriodically(
new Action0() {
@Override
public void call() {
List<Map<String, GaugeData>> listofAggregates = new ArrayList<>();
synchronized (workersMap) {
for (Map.Entry<Integer, WorkerMetrics> entry : workersMap.entrySet()) {
// get the aggregate metric values by metric group per worker
listofAggregates.add(metricAggregator.getAggregates(entry.getValue().getGaugesByMetricGrp()));
}
}
final int numWorkers = numStageWorkersFn.call(stage);
// get the aggregate metric values by metric group for all workers in stage
Map<String, GaugeData> allWorkerAggregates = getAggregates(listofAggregates);
logger.info("Job stage " + stage + " avgResUsage from " +
workersMap.size() + " workers: " + allWorkerAggregates.toString());
for (Map.Entry<String, Set<String>> userDefinedMetric : autoScaleMetricsConfig.getUserDefinedMetrics().entrySet()) {
final String metricGrp = userDefinedMetric.getKey();
for (String metric : userDefinedMetric.getValue()) {
if (!allWorkerAggregates.containsKey(metricGrp) || !allWorkerAggregates.get(metricGrp).getGauges().containsKey(metric)) {
logger.debug("no gauge data found for UserDefined (metric={})", userDefinedMetric);
} else {
jobAutoScaleObserver.onNext(
new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.UserDefined, stage,
allWorkerAggregates.get(metricGrp).getGauges().get(metric), numWorkers, ""));
}
}
}
if (allWorkerAggregates.containsKey(KAFKA_CONSUMER_FETCH_MGR_METRIC_GROUP)) {
final Map<String, Double> gauges = allWorkerAggregates.get(KAFKA_CONSUMER_FETCH_MGR_METRIC_GROUP).getGauges();
if (gauges.containsKey(KAFKA_LAG)) {
jobAutoScaleObserver.onNext(
new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.KafkaLag, stage,
gauges.get(KAFKA_LAG), numWorkers, "")
);
}
if (gauges.containsKey(KAFKA_PROCESSED)) {
jobAutoScaleObserver.onNext(
new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.KafkaProcessed, stage,
gauges.get(KAFKA_PROCESSED), numWorkers, ""));
}
}
if (allWorkerAggregates.containsKey(RESOURCE_USAGE_METRIC_GROUP)) {
// cpuPctUsageCurr is Published as (cpuUsageCurr * 100.0) from ResourceUsagePayloadSetter, reverse transform to retrieve curr cpu usage
double cpuUsageCurr = allWorkerAggregates.get(RESOURCE_USAGE_METRIC_GROUP).getGauges().get(MetricStringConstants.CPU_PCT_USAGE_CURR) / 100.0;
jobAutoScaleObserver.onNext(
new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.CPU, stage,
cpuUsageCurr, numWorkers, ""));
jobAutoScaleObserver.onNext(
new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Memory, stage,
allWorkerAggregates.get(RESOURCE_USAGE_METRIC_GROUP).getGauges().get(MetricStringConstants.TOT_MEM_USAGE_CURR), numWorkers, ""));
jobAutoScaleObserver.onNext(
new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.Network, stage,
allWorkerAggregates.get(RESOURCE_USAGE_METRIC_GROUP).getGauges().get(MetricStringConstants.NW_BYTES_USAGE_CURR), numWorkers, ""));
jobAutoScaleObserver.onNext(
new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.JVMMemory, stage,
allWorkerAggregates.get(RESOURCE_USAGE_METRIC_GROUP).getGauges().get("jvmMemoryUsedBytes"), numWorkers, "")
);
}
if (allWorkerAggregates.containsKey(DATA_DROP_METRIC_GROUP)) {
final GaugeData gaugeData = allWorkerAggregates.get(DATA_DROP_METRIC_GROUP);
final Map<String, Double> gauges = gaugeData.getGauges();
if (gauges.containsKey(DROP_PERCENT)) {
jobAutoScaleObserver.onNext(
new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.DataDrop, stage,
gauges.get(DROP_PERCENT), numWorkers, ""));
}
}
if (allWorkerAggregates.containsKey(WORKER_STAGE_INNER_INPUT)) {
final GaugeData gaugeData = allWorkerAggregates.get(WORKER_STAGE_INNER_INPUT);
final Map<String, Double> gauges = gaugeData.getGauges();
if (gauges.containsKey(ON_NEXT_GAUGE)) {
// Divide by 6 to account for 6 second reset by Atlas on counter metric.
jobAutoScaleObserver.onNext(
new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.RPS, stage,
gauges.get(ON_NEXT_GAUGE) / 6.0, numWorkers, ""));
}
}
double sourceJobDrops = 0;
boolean hasSourceJobDropsMetric = false;
Map<String, String> sourceMetricsRecent = sourceJobMetricsRecent.asMap();
for (Map.Entry<String, WorkerMetrics> worker : sourceJobWorkersMap.entrySet()) {
Map<String, GaugeData> metricGroups = metricAggregator.getAggregates(worker.getValue().getGaugesByMetricGrp());
for (Map.Entry<String, GaugeData> group : metricGroups.entrySet()) {
String metricKey = worker.getKey() + ":" + group.getKey();
for (Map.Entry<String, Double> gauge : group.getValue().getGauges().entrySet()) {
if (sourceMetricsRecent.containsKey(metricKey) &&
autoScaleMetricsConfig.isSourceJobDropMetric(group.getKey(), gauge.getKey())) {
sourceJobDrops += gauge.getValue();
hasSourceJobDropsMetric = true;
}
}
}
}
if (hasSourceJobDropsMetric) {
logger.info("Job stage {}, source job drop metrics: {}", stage, sourceJobDrops);
// Divide by 6 to account for 6 second reset by Atlas on counter metric.
jobAutoScaleObserver.onNext(
new JobAutoScaler.Event(StageScalingPolicy.ScalingReason.SourceJobDrop, stage,
sourceJobDrops / 6.0 / numWorkers, numWorkers, ""));
}
}
}, metricsIntervalSeconds, metricsIntervalSeconds, TimeUnit.SECONDS
));
return new Subscriber<MetricData>() {
@Override
public void onCompleted() {
child.unsubscribe();
}
@Override
public void onError(Throwable e) {
logger.error("Unexpected error: " + e.getMessage(), e);
}
@Override
public void onNext(MetricData metricData) {
logger.debug("Got metric metricData for job " + jobId + " stage " + stage +
", worker " + metricData.getWorkerNumber() + ": " + metricData);
if (jobId.equals(metricData.getJobId())) {
addDataPoint(metricData);
} else {
addSourceJobDataPoint(metricData);
}
}
};
}
}
private void start() {
final AtomicReference<List<Subscription>> ref = new AtomicReference<>(new ArrayList<>());
masterClientApi.schedulingChanges(jobId)
.doOnNext(jobSchedulingInfo -> {
final Map<Integer, WorkerAssignments> workerAssignments = jobSchedulingInfo.getWorkerAssignments();
for (Map.Entry<Integer, WorkerAssignments> workerAssignmentsEntry : workerAssignments.entrySet()) {
final WorkerAssignments workerAssignment = workerAssignmentsEntry.getValue();
logger.debug("setting numWorkers={} for stage={}", workerAssignment.getNumWorkers(), workerAssignment.getStage());
numWorkersByStage.put(workerAssignment.getStage(), workerAssignment.getNumWorkers());
workerHostsByStage.put(workerAssignment.getStage(), new ArrayList<>(workerAssignment.getHosts().values()));
}
}).subscribe();
logger.info("Starting worker metric handler with autoscale config {}", autoScaleMetricsConfig);
metricDataSubject
.groupBy(metricData -> metricData.getStage())
.lift(new DropOperator<>(WorkerMetricHandler.class.getName()))
.doOnNext(go -> {
final Integer stage = go.getKey();
final Subscription s = go
.lift(new StageMetricDataOperator(stage, lookupNumWorkersByStage, autoScaleMetricsConfig))
.subscribe();
logger.info("adding subscription for stage {} StageMetricDataOperator", stage);
ref.get().add(s);
})
.doOnUnsubscribe(() -> {
for (Subscription s : ref.get())
s.unsubscribe();
})
.subscribe();
}
}
| 8,446 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/MetricAggregator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class MetricAggregator {
private static final Logger logger = LoggerFactory.getLogger(MetricAggregator.class);
private final AutoScaleMetricsConfig autoScaleMetricsConfig;
MetricAggregator(final AutoScaleMetricsConfig autoScaleMetricsConfig) {
this.autoScaleMetricsConfig = autoScaleMetricsConfig;
}
public Map<String, GaugeData> getAggregates(final Map<String, List<GaugeData>> dataPointsByMetricGrp) {
final Map<String, GaugeData> result = new HashMap<>();
for (Map.Entry<String, List<GaugeData>> metricGrpGauges : dataPointsByMetricGrp.entrySet()) {
final String metricGrp = metricGrpGauges.getKey();
final List<GaugeData> gaugeDataList = metricGrpGauges.getValue();
int n = 0;
Map<String, Double> gaugeAggregates = new HashMap<>();
for (GaugeData gaugeData : gaugeDataList) {
n++;
final Map<String, Double> gauges = gaugeData.getGauges();
for (Map.Entry<String, Double> gaugeEntry : gauges.entrySet()) {
final String gaugeName = gaugeEntry.getKey();
final double currValue = gaugeEntry.getValue();
final AutoScaleMetricsConfig.AggregationAlgo aggregationAlgo = autoScaleMetricsConfig.getAggregationAlgo(metricGrp, gaugeName);
switch (aggregationAlgo) {
case AVERAGE:
if (!gaugeAggregates.containsKey(gaugeName)) {
gaugeAggregates.put(gaugeName, currValue);
} else {
final double avg = ((gaugeAggregates.get(gaugeName) * (n - 1)) + currValue) / n;
gaugeAggregates.put(gaugeName, avg);
}
break;
case MAX:
if (!gaugeAggregates.containsKey(gaugeName)) {
gaugeAggregates.put(gaugeName, currValue);
} else {
final Double prev = gaugeAggregates.get(gaugeName);
final double max = (currValue > prev) ? currValue : prev;
gaugeAggregates.put(gaugeName, max);
}
break;
default:
logger.warn("unsupported aggregation algo {} for {}:{}", aggregationAlgo.name(), metricGrp, gaugeName);
break;
}
}
}
result.put(metricGrp, new GaugeData(System.currentTimeMillis(), gaugeAggregates));
}
return result;
}
}
| 8,447 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/GaugeData.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import io.mantisrx.common.metrics.measurement.GaugeMeasurement;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
class GaugeData {
private final long when;
private Map<String, Double> gauges = new HashMap<>();
GaugeData(final long when, final List<GaugeMeasurement> gauges) {
this.when = when;
for (GaugeMeasurement gauge : gauges) {
this.gauges.put(gauge.getEvent(), (double) gauge.getValue());
}
}
GaugeData(final long when, final Map<String, Double> gauges) {
this.when = when;
this.gauges = gauges;
}
public long getWhen() {
return when;
}
public Map<String, Double> getGauges() {
return gauges;
}
@Override
public String toString() {
return "GaugeData{" +
"when=" + when +
", gauges=" + gauges +
'}';
}
}
| 8,448 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/WorkerMetrics.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import static io.mantisrx.server.core.stats.MetricStringConstants.DATA_DROP_METRIC_GROUP;
import static io.mantisrx.server.core.stats.MetricStringConstants.DROP_COUNT;
import static io.mantisrx.server.core.stats.MetricStringConstants.DROP_PERCENT;
import static io.mantisrx.server.core.stats.MetricStringConstants.ON_NEXT_COUNT;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
public class WorkerMetrics {
private final int valuesToKeepPerMetric;
private final ConcurrentMap<String, List<GaugeData>> gaugesByMetricGrp = new ConcurrentHashMap<>();
public WorkerMetrics(int valuesToKeepPerMetric) {
this.valuesToKeepPerMetric = valuesToKeepPerMetric;
}
public GaugeData transform(final String metricGroupName, final GaugeData data) {
if (metricGroupName.equals(DATA_DROP_METRIC_GROUP)) {
final Map<String, Double> gauges = data.getGauges();
if (gauges.containsKey(DROP_COUNT) && gauges.containsKey(ON_NEXT_COUNT)) {
final Double onNextCount = gauges.get(ON_NEXT_COUNT);
final Double dropCount = gauges.get(DROP_COUNT);
final double totalCount = dropCount + onNextCount;
if (totalCount > 0.0) {
final double dropPercent = (dropCount * 100.0) / totalCount;
Map<String, Double> newGauges = new HashMap<>(2);
newGauges.put(DROP_PERCENT, dropPercent);
newGauges.put(ON_NEXT_COUNT, gauges.get(ON_NEXT_COUNT));
return new GaugeData(data.getWhen(), newGauges);
}
} else if (gauges.containsKey(ON_NEXT_COUNT)) {
return new GaugeData(data.getWhen(), Collections.singletonMap(ON_NEXT_COUNT, gauges.get(ON_NEXT_COUNT)));
}
return new GaugeData(data.getWhen(), Collections.emptyMap());
}
return data;
}
public MetricData addDataPoint(final String metricGroupName, final MetricData metricData) {
if (!gaugesByMetricGrp.containsKey(metricGroupName)) {
gaugesByMetricGrp.putIfAbsent(metricGroupName, new CopyOnWriteArrayList<>());
}
final GaugeData transformed = transform(metricGroupName, metricData.getGaugeData());
gaugesByMetricGrp.get(metricGroupName).add(transformed);
if (gaugesByMetricGrp.get(metricGroupName).size() > valuesToKeepPerMetric) {
gaugesByMetricGrp.get(metricGroupName).remove(0);
}
return new MetricData(metricData.getJobId(), metricData.getStage(), metricData.getWorkerIndex(),
metricData.getWorkerNumber(), metricData.getMetricGroupName(), transformed);
}
public Map<String, List<GaugeData>> getGaugesByMetricGrp() {
return gaugesByMetricGrp;
}
}
| 8,449 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/WorkerMetricSubscription.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import static io.mantisrx.server.core.stats.MetricStringConstants.METRIC_NAME_STR;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.mantisrx.server.worker.client.MetricsClient;
import io.mantisrx.server.worker.client.SseWorkerConnectionFunction;
import io.mantisrx.server.worker.client.WorkerConnectionsStatus;
import io.mantisrx.server.worker.client.WorkerMetricsClient;
import java.io.UnsupportedEncodingException;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
import rx.functions.Action1;
public class WorkerMetricSubscription {
private static final Logger logger = LoggerFactory.getLogger(WorkerMetricSubscription.class);
final MetricsClient<MantisServerSentEvent> metricsClient;
// worker metrics to subscribe to
private final Set<String> metrics;
public WorkerMetricSubscription(final String jobId, WorkerMetricsClient workerMetricsClient, Set<String> metricGroups) {
this.metrics = metricGroups;
SinkParameters metricNamesFilter = null;
try {
SinkParameters.Builder sinkParamsBuilder = new SinkParameters.Builder();
for (String metric : metricGroups) {
sinkParamsBuilder = sinkParamsBuilder.withParameter(METRIC_NAME_STR, metric);
}
metricNamesFilter = sinkParamsBuilder.build();
} catch (UnsupportedEncodingException e) {
logger.error("error encoding sink parameters", e);
}
metricsClient = workerMetricsClient.getMetricsClientByJobId(jobId,
new SseWorkerConnectionFunction(true, new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
logger.error("Metric connection error: " + throwable.getMessage());
try {
Thread.sleep(500);
} catch (InterruptedException ie) {
logger.error("Interrupted waiting for retrying connection");
}
}
}, metricNamesFilter),
new Observer<WorkerConnectionsStatus>() {
@Override
public void onCompleted() {
logger.info("got onCompleted in WorkerConnStatus obs");
}
@Override
public void onError(Throwable e) {
logger.info("got onError in WorkerConnStatus obs");
}
@Override
public void onNext(WorkerConnectionsStatus workerConnectionsStatus) {
logger.info("got WorkerConnStatus {}", workerConnectionsStatus);
}
});
}
public Set<String> getMetrics() {
return metrics;
}
public MetricsClient<MantisServerSentEvent> getMetricsClient() {
return metricsClient;
}
}
| 8,450 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/JobAutoScaler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import com.netflix.control.clutch.Clutch;
import com.netflix.control.clutch.ClutchExperimental;
import io.mantisrx.common.MantisProperties;
import io.mantisrx.common.SystemParameters;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.server.core.stats.UsageDataStats;
import io.mantisrx.server.master.client.MantisMasterGateway;
import io.mantisrx.server.worker.jobmaster.clutch.ClutchAutoScaler;
import io.mantisrx.server.worker.jobmaster.clutch.ClutchConfiguration;
import io.mantisrx.server.worker.jobmaster.clutch.experimental.MantisClutchConfigurationSelector;
import io.mantisrx.server.worker.jobmaster.clutch.rps.ClutchRpsPIDConfig;
import io.mantisrx.server.worker.jobmaster.clutch.rps.RpsClutchConfigurationSelector;
import io.mantisrx.server.worker.jobmaster.clutch.rps.RpsMetricComputer;
import io.mantisrx.server.worker.jobmaster.clutch.rps.RpsScaleComputer;
import io.mantisrx.server.worker.jobmaster.control.actuators.MantisStageActuator;
import io.mantisrx.server.worker.jobmaster.control.utils.TransformerWrapper;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.io.vavr.jackson.datatype.VavrModule;
import io.vavr.control.Option;
import io.vavr.control.Try;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.BackpressureOverflow;
import rx.Observable;
import rx.Observer;
import rx.Subscriber;
import rx.Subscription;
import rx.functions.Func1;
import rx.functions.Func2;
import rx.observers.SerializedObserver;
import rx.subjects.PublishSubject;
public class JobAutoScaler {
private static final ObjectMapper objectMapper = new ObjectMapper();
private static final Logger logger = LoggerFactory.getLogger(JobAutoScaler.class);
private static final String PercentNumberFormat = "%5.2f";
private static final Map<StageScalingPolicy.ScalingReason, Clutch.Metric> metricMap = new HashMap<>();
static {
objectMapper.registerModule(new VavrModule());
}
static {
metricMap.put(StageScalingPolicy.ScalingReason.CPU, Clutch.Metric.CPU);
metricMap.put(StageScalingPolicy.ScalingReason.JVMMemory, Clutch.Metric.MEMORY);
metricMap.put(StageScalingPolicy.ScalingReason.Network, Clutch.Metric.NETWORK);
metricMap.put(StageScalingPolicy.ScalingReason.KafkaLag, Clutch.Metric.LAG);
metricMap.put(StageScalingPolicy.ScalingReason.DataDrop, Clutch.Metric.DROPS);
metricMap.put(StageScalingPolicy.ScalingReason.UserDefined, Clutch.Metric.UserDefined);
metricMap.put(StageScalingPolicy.ScalingReason.RPS, Clutch.Metric.RPS);
metricMap.put(StageScalingPolicy.ScalingReason.SourceJobDrop, Clutch.Metric.SOURCEJOB_DROP);
}
private final String jobId;
private final MantisMasterGateway masterClientApi;
private final SchedulingInfo schedulingInfo;
private final PublishSubject<Event> subject;
private final Context context;
JobAutoScaler(String jobId, SchedulingInfo schedulingInfo, MantisMasterGateway masterClientApi,
Context context) {
this.jobId = jobId;
this.masterClientApi = masterClientApi;
this.schedulingInfo = schedulingInfo;
subject = PublishSubject.create();
this.context = context;
}
public static void main(String[] args) {
Observable.interval(1, TimeUnit.DAYS)
.doOnNext(x -> System.out.println(x))
.take(1)
.toBlocking()
.last();
}
Observer<Event> getObserver() {
return new SerializedObserver<>(subject);
}
private com.netflix.control.clutch.Event mantisEventToClutchEvent(StageSchedulingInfo stageSchedulingInfo, Event event) {
return new com.netflix.control.clutch.Event(metricMap.get(event.type),
Util.getEffectiveValue(stageSchedulingInfo, event.getType(), event.getValue()));
}
void start() {
subject
.onBackpressureBuffer(100, () -> {
logger.info("onOverflow triggered, dropping old events");
}, BackpressureOverflow.ON_OVERFLOW_DROP_OLDEST)
.doOnRequest(x -> logger.info("Scaler requested {} metrics.", x))
.groupBy(event -> event.getStage())
.flatMap(go -> {
Integer stage = Optional.ofNullable(go.getKey()).orElse(-1);
final StageSchedulingInfo stageSchedulingInfo = schedulingInfo.forStage(stage);
logger.info("System Environment:");
System.getenv().forEach((key, value) -> {
logger.info("{} = {}", key, value);
});
Optional<String> clutchCustomConfiguration =
Optional.ofNullable(
MantisProperties.getProperty("JOB_PARAM_" + SystemParameters.JOB_MASTER_CLUTCH_SYSTEM_PARAM));
if (stageSchedulingInfo != null && (stageSchedulingInfo.getScalingPolicy() != null ||
clutchCustomConfiguration.isPresent())) {
ClutchConfiguration config = null;
int minSize = 0;
int maxSize = 0;
boolean useJsonConfigBased = false;
boolean useClutch = false;
boolean useClutchRps = false;
boolean useClutchExperimental = false;
// Determine which type of scaler to use.
if (stageSchedulingInfo.getScalingPolicy() != null) {
minSize = stageSchedulingInfo.getScalingPolicy().getMin();
maxSize = stageSchedulingInfo.getScalingPolicy().getMax();
if (stageSchedulingInfo.getScalingPolicy().getStrategies() != null) {
Set<StageScalingPolicy.ScalingReason> reasons = stageSchedulingInfo.getScalingPolicy().getStrategies()
.values()
.stream()
.map(StageScalingPolicy.Strategy::getReason)
.collect(Collectors.toSet());
if (reasons.contains(StageScalingPolicy.ScalingReason.Clutch)) {
useClutch = true;
} else if (reasons.contains(StageScalingPolicy.ScalingReason.ClutchExperimental)) {
useClutchExperimental = true;
} else if (reasons.contains(StageScalingPolicy.ScalingReason.ClutchRps)) {
useClutchRps = true;
}
}
}
if (clutchCustomConfiguration.isPresent()) {
try {
config = getClutchConfiguration(clutchCustomConfiguration.get()).get(stage);
} catch (Exception ex) {
logger.error("Error parsing json clutch config: {}", clutchCustomConfiguration.get(), ex);
}
if (config != null) {
if (config.getRpsConfig().isDefined()) {
useClutchRps = true;
} else if (config.getUseExperimental().getOrElse(false)) {
useClutch = true;
} else {
useJsonConfigBased = true;
}
if (config.getMinSize() > 0) {
minSize = config.getMinSize();
}
if (config.getMaxSize() > 0) {
maxSize = config.getMaxSize();
}
}
}
int initialSize = stageSchedulingInfo.getNumberOfInstances();
StageScaler scaler = new StageScaler(stage, stageSchedulingInfo);
MantisStageActuator actuator = new MantisStageActuator(initialSize, scaler);
Observable.Transformer<Event, com.netflix.control.clutch.Event> transformToClutchEvent =
obs -> obs.map(event -> this.mantisEventToClutchEvent(stageSchedulingInfo, event))
.filter(event -> event.metric != null);
Observable<Integer> workerCounts = context.getWorkerMapObservable()
.map(x -> x.getWorkersForStage(go.getKey()).size())
.distinctUntilChanged()
.throttleLast(5, TimeUnit.SECONDS);
// Create the scaler.
if (useClutchRps) {
logger.info("Using clutch rps scaler, job: {}, stage: {} ", jobId, stage);
ClutchRpsPIDConfig rpsConfig = Option.of(config).flatMap(ClutchConfiguration::getRpsConfig).getOrNull();
return go
.compose(transformToClutchEvent)
.compose(new ClutchExperimental(
actuator,
initialSize,
minSize,
maxSize,
workerCounts,
Observable.interval(1, TimeUnit.HOURS),
TimeUnit.MINUTES.toMillis(10),
new RpsClutchConfigurationSelector(stage, stageSchedulingInfo, config),
new RpsMetricComputer(),
new RpsScaleComputer(rpsConfig)));
} else if (useJsonConfigBased) {
logger.info("Using json config based scaler, job: {}, stage: {} ", jobId, stage);
return go
.compose(new ClutchAutoScaler(stageSchedulingInfo, scaler, config, initialSize));
} else if (useClutch) {
logger.info("Using clutch scaler, job: {}, stage: {} ", jobId, stage);
return go
.compose(transformToClutchEvent)
.compose(new Clutch(
actuator,
initialSize,
minSize,
maxSize));
} else if (useClutchExperimental) {
logger.info("Using clutch experimental scaler, job: {}, stage: {} ", jobId, stage);
return go
.compose(transformToClutchEvent)
.compose(new ClutchExperimental(
actuator,
initialSize,
minSize,
maxSize,
workerCounts,
Observable.interval(1, TimeUnit.HOURS),
TimeUnit.MINUTES.toMillis(10),
new MantisClutchConfigurationSelector(stage, stageSchedulingInfo)));
} else {
logger.info("Using rule based scaler, job: {}, stage: {} ", jobId, stage);
return go.compose(new TransformerWrapper<>(new StageScaleOperator<>(stage, stageSchedulingInfo)));
}
} else {
return go;
}
})
.doOnCompleted(() -> logger.info("onComplete on JobAutoScaler subject"))
.doOnError(t -> logger.error("got onError in JobAutoScaler", t))
.doOnSubscribe(() -> logger.info("onSubscribe JobAutoScaler"))
.doOnUnsubscribe(() -> {
logger.info("Unsubscribing for JobAutoScaler of job " + jobId);
})
.retry()
.subscribe();
}
/**
* Decodes the Clutch configuration parameter taking into account the parameter used to be a single
* config for stage 1, we now accept a mapping of stage -> config and this method wraps
* the logic for decoding either parameter.
*
* @param jsonConfig A JSON representation of a Clutch Configuration Map.
*
* @return A map of stage -> config for Clutch.
*/
protected Map<Integer, ClutchConfiguration> getClutchConfiguration(String jsonConfig) {
return Try.<Map<Integer, ClutchConfiguration>>of(() -> objectMapper.readValue(jsonConfig, new TypeReference<Map<Integer, ClutchConfiguration>>() {}))
.getOrElseGet(t -> Try.of(() -> {
ClutchConfiguration config = objectMapper.readValue(jsonConfig, new TypeReference<ClutchConfiguration>() {});
Map<Integer, ClutchConfiguration> configs = new HashMap<>();
configs.put(1, config);
return configs;
}).get());
}
public static class Event {
private final StageScalingPolicy.ScalingReason type;
private final int stage;
private final double value;
private final int numWorkers;
private final String message;
public Event(StageScalingPolicy.ScalingReason type, int stage, double value, int numWorkers, String message) {
this.type = type;
this.stage = stage;
this.value = value;
this.numWorkers = numWorkers;
this.message = message;
}
public StageScalingPolicy.ScalingReason getType() {
return type;
}
public int getStage() {
return stage;
}
public double getValue() {
return value;
}
public int getNumWorkers() {
return numWorkers;
}
public String getMessage() {
return message;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Event event = (Event) o;
if (stage != event.stage) return false;
if (Double.compare(event.value, value) != 0) return false;
if (numWorkers != event.numWorkers) return false;
if (type != event.type) return false;
return message != null ? message.equals(event.message) : event.message == null;
}
@Override
public int hashCode() {
int result;
long temp;
result = type != null ? type.hashCode() : 0;
result = 31 * result + stage;
temp = Double.doubleToLongBits(value);
result = 31 * result + (int) (temp ^ (temp >>> 32));
result = 31 * result + numWorkers;
result = 31 * result + (message != null ? message.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "Event{" +
"type=" + type +
", stage=" + stage +
", value=" + value +
", numWorkers=" + numWorkers +
", message='" + message + '\'' +
'}';
}
}
public class StageScaler {
private final int stage;
private final StageSchedulingInfo stageSchedulingInfo;
private final AtomicReference<Subscription> inProgressScalingSubscription = new AtomicReference<>(null);
private final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic = attempts -> attempts
.zipWith(Observable.range(1, Integer.MAX_VALUE), (Func2<Throwable, Integer, Integer>) (t1, integer) -> integer)
.flatMap((Func1<Integer, Observable<?>>) integer -> {
long delay = 2 * (integer > 5 ? 10 : integer);
logger.info("retrying scaleJobStage request after sleeping for " + delay + " secs");
return Observable.timer(delay, TimeUnit.SECONDS);
});
public StageScaler(int stage, StageSchedulingInfo stageSchedulingInfo) {
this.stage = stage;
this.stageSchedulingInfo = stageSchedulingInfo;
}
private void cancelOutstandingScalingRequest() {
if (inProgressScalingSubscription.get() != null && !inProgressScalingSubscription.get().isUnsubscribed()) {
inProgressScalingSubscription.get().unsubscribe();
inProgressScalingSubscription.set(null);
}
}
private void setOutstandingScalingRequest(final Subscription subscription) {
inProgressScalingSubscription.compareAndSet(null, subscription);
}
public int getDesiredWorkersForScaleUp(final int increment, final int numCurrentWorkers) {
final int desiredWorkers;
if (!stageSchedulingInfo.getScalingPolicy().isEnabled()) {
logger.warn("Job " + jobId + " stage " + stage + " is not scalable, can't increment #workers by " + increment);
return numCurrentWorkers;
}
if (numCurrentWorkers < 0 || increment < 1) {
logger.error("current number of workers({}) not known or increment({}) < 1, will not scale up", numCurrentWorkers, increment);
return numCurrentWorkers;
} else {
final int maxWorkersForStage = stageSchedulingInfo.getScalingPolicy().getMax();
desiredWorkers = Math.min(numCurrentWorkers + increment, maxWorkersForStage);
return desiredWorkers;
}
}
public void scaleUpStage(final int numCurrentWorkers, final int desiredWorkers, final String reason) {
logger.info("scaleUpStage incrementing number of workers from {} to {}", numCurrentWorkers, desiredWorkers);
cancelOutstandingScalingRequest();
final Subscription subscription = masterClientApi.scaleJobStage(jobId, stage, desiredWorkers, reason)
.retryWhen(retryLogic)
.onErrorResumeNext(throwable -> {
logger.error("caught error when scaling up stage {}", stage);
return Observable.empty();
})
.subscribe();
setOutstandingScalingRequest(subscription);
}
public int getDesiredWorkersForScaleDown(final int decrement, final int numCurrentWorkers) {
final int desiredWorkers;
if (!stageSchedulingInfo.getScalingPolicy().isEnabled()) {
logger.warn("Job " + jobId + " stage " + stage + " is not scalable, can't decrement #workers by " + decrement);
return numCurrentWorkers;
}
if (numCurrentWorkers < 0 || decrement < 1) {
logger.error("current number of workers({}) not known or decrement({}) < 1, will not scale down", numCurrentWorkers, decrement);
return numCurrentWorkers;
} else {
int min = stageSchedulingInfo.getScalingPolicy().getMin();
desiredWorkers = Math.max(numCurrentWorkers - decrement, min);
}
return desiredWorkers;
}
public void scaleDownStage(final int numCurrentWorkers, final int desiredWorkers, final String reason) {
logger.info("scaleDownStage decrementing number of workers from {} to {}", numCurrentWorkers, desiredWorkers);
cancelOutstandingScalingRequest();
final Subscription subscription = masterClientApi.scaleJobStage(jobId, stage, desiredWorkers, reason)
.retryWhen(retryLogic)
.onErrorResumeNext(throwable -> {
logger.error("caught error when scaling down stage {}", stage);
return Observable.empty();
})
.subscribe();
setOutstandingScalingRequest(subscription);
}
public int getStage() {
return stage;
}
}
private class StageScaleOperator<T, R> implements Observable.Operator<Object, Event> {
private final int stage;
private final StageSchedulingInfo stageSchedulingInfo;
private final StageScaler scaler;
private volatile long lastScaledAt = 0L;
private StageScaleOperator(int stage,
StageSchedulingInfo stageSchedulingInfo) {
this.stage = stage;
this.stageSchedulingInfo = stageSchedulingInfo;
this.scaler = new StageScaler(stage, this.stageSchedulingInfo);
logger.info("cooldownSecs set to {}", stageSchedulingInfo.getScalingPolicy().getCoolDownSecs());
}
@Override
public Subscriber<? super Event> call(final Subscriber<? super Object> child) {
return new Subscriber<Event>() {
private final Map<StageScalingPolicy.ScalingReason, UsageDataStats> dataStatsMap = new HashMap<>();
@Override
public void onCompleted() {
child.unsubscribe();
}
@Override
public void onError(Throwable e) {
logger.error("Unexpected error: " + e.getMessage(), e);
}
@Override
public void onNext(Event event) {
final StageScalingPolicy scalingPolicy = stageSchedulingInfo.getScalingPolicy();
long coolDownSecs = scalingPolicy == null ? Long.MAX_VALUE : scalingPolicy.getCoolDownSecs();
boolean scalable = stageSchedulingInfo.getScalable() && scalingPolicy != null && scalingPolicy.isEnabled();
logger.debug("Will check for autoscaling job " + jobId + " stage " + stage + " due to event: " + event);
if (scalable && scalingPolicy != null) {
final StageScalingPolicy.Strategy strategy = scalingPolicy.getStrategies().get(event.getType());
if (strategy != null) {
double effectiveValue = Util.getEffectiveValue(stageSchedulingInfo, event.getType(), event.getValue());
UsageDataStats stats = dataStatsMap.get(event.getType());
if (stats == null) {
stats = new UsageDataStats(
strategy.getScaleUpAbovePct(), strategy.getScaleDownBelowPct(), strategy.getRollingCount());
dataStatsMap.put(event.getType(), stats);
}
stats.add(effectiveValue);
if (lastScaledAt < (System.currentTimeMillis() - coolDownSecs * 1000)) {
logger.info(jobId + ", stage " + stage + ": eff=" +
String.format(PercentNumberFormat, effectiveValue) + ", thresh=" + strategy.getScaleUpAbovePct());
if (stats.getHighThreshTriggered()) {
logger.info("Attempting to scale up stage " + stage + " of job " + jobId + " by " +
scalingPolicy.getIncrement() + " workers, because " +
event.type + " exceeded scaleUpThreshold of " +
String.format(PercentNumberFormat, strategy.getScaleUpAbovePct()) + " " +
stats.getCurrentHighCount() + " times");
final int numCurrWorkers = event.getNumWorkers();
final int desiredWorkers = scaler.getDesiredWorkersForScaleUp(scalingPolicy.getIncrement(), numCurrWorkers);
if (desiredWorkers > numCurrWorkers) {
scaler.scaleUpStage(numCurrWorkers, desiredWorkers, event.getType() + " with value " +
String.format(PercentNumberFormat, effectiveValue) +
" exceeded scaleUp threshold of " + strategy.getScaleUpAbovePct());
lastScaledAt = System.currentTimeMillis();
logger.info("lastScaledAt set to {} after scale up request", lastScaledAt);
} else {
logger.debug("scale up NOOP: desiredWorkers same as current workers");
}
} else if (stats.getLowThreshTriggered()) {
logger.info("Attempting to scale down stage " + stage + " of job " + jobId + " by " +
scalingPolicy.getDecrement() + " workers because " + event.getType() +
" is below scaleDownThreshold of " + strategy.getScaleDownBelowPct() +
" " + stats.getCurrentLowCount() + " times");
final int numCurrentWorkers = event.getNumWorkers();
final int desiredWorkers = scaler.getDesiredWorkersForScaleDown(scalingPolicy.getDecrement(), numCurrentWorkers);
if (desiredWorkers < numCurrentWorkers) {
scaler.scaleDownStage(numCurrentWorkers, desiredWorkers, event.getType() + " with value " +
String.format(PercentNumberFormat, effectiveValue) +
" is below scaleDown threshold of " + strategy.getScaleDownBelowPct());
lastScaledAt = System.currentTimeMillis();
logger.info("lastScaledAt set to {} after scale down request", lastScaledAt);
} else {
logger.debug("scale down NOOP: desiredWorkers same as current workers");
}
}
} else {
logger.debug("lastScaledAt {} within cooldown period", lastScaledAt);
}
}
}
}
};
}
}
}
| 8,451 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/SourceJobWorkerMetricsSubscription.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.runtime.parameter.SourceJobParameters;
import io.mantisrx.server.core.NamedJobInfo;
import io.mantisrx.server.master.client.MantisMasterGateway;
import io.mantisrx.server.worker.client.WorkerMetricsClient;
import java.util.*;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
/**
* Manages subscriptions to source job workers.
*/
public class SourceJobWorkerMetricsSubscription {
private static final Logger logger = LoggerFactory.getLogger(SourceJobWorkerMetricsSubscription.class);
private final List<SourceJobParameters.TargetInfo> targetInfos;
private final MantisMasterGateway masterClient;
private final WorkerMetricsClient workerMetricsClient;
private final AutoScaleMetricsConfig metricsConfig;
public SourceJobWorkerMetricsSubscription(List<SourceJobParameters.TargetInfo> targetInfos,
MantisMasterGateway masterClient,
WorkerMetricsClient workerMetricsClient,
AutoScaleMetricsConfig metricsConfig) {
this.targetInfos = targetInfos;
this.masterClient = masterClient;
this.workerMetricsClient = workerMetricsClient;
this.metricsConfig = metricsConfig;
}
public Observable<Observable<MantisServerSentEvent>> getResults() {
return Observable.merge(getSourceJobToClientMap().entrySet().stream().map(entry -> {
String sourceJobName = entry.getKey();
Set<String> clientIds = entry.getValue();
Set<String> sourceJobMetrics = metricsConfig.generateSourceJobMetricGroups(clientIds);
return masterClient
.namedJobInfo(sourceJobName)
.map(NamedJobInfo::getJobId)
.flatMap(jobId -> getResultsForJobId(jobId, sourceJobMetrics));
}).collect(Collectors.toList()));
}
protected Observable<Observable<MantisServerSentEvent>> getResultsForJobId(String jobId, Set<String> sourceJobMetrics) {
return new WorkerMetricSubscription(jobId, workerMetricsClient, sourceJobMetrics).getMetricsClient().getResults();
}
protected Map<String, Set<String>> getSourceJobToClientMap() {
Map<String, Set<String>> results = new HashMap<>();
for (SourceJobParameters.TargetInfo info : targetInfos) {
Set<String> clientIds = results.get(info.sourceJobName);
if (clientIds == null) {
clientIds = new HashSet<>();
results.put(info.sourceJobName, clientIds);
}
clientIds.add(info.clientId);
}
return results;
}
}
| 8,452 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/MetricData.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import io.mantisrx.common.metrics.measurement.GaugeMeasurement;
import java.util.List;
class MetricData {
private final String jobId;
private final int stage;
private final int workerIndex;
private final int workerNumber;
private final String metricGroupName;
private final GaugeData gauges;
MetricData(final String jobId, final int stage, final int workerIndex, final int workerNumber,
final String metricGroupName, final List<GaugeMeasurement> gaugeMeasurements) {
this(jobId, stage, workerIndex, workerNumber, metricGroupName,
new GaugeData(System.currentTimeMillis(), gaugeMeasurements));
}
MetricData(final String jobId, final int stage, final int workerIndex, final int workerNumber,
final String metricGroupName, final GaugeData gaugeData) {
this.jobId = jobId;
this.stage = stage;
this.workerIndex = workerIndex;
this.workerNumber = workerNumber;
this.metricGroupName = metricGroupName;
this.gauges = gaugeData;
}
String getJobId() {
return jobId;
}
int getStage() {
return stage;
}
int getWorkerIndex() {
return workerIndex;
}
int getWorkerNumber() {
return workerNumber;
}
public String getMetricGroupName() {
return metricGroupName;
}
public GaugeData getGaugeData() {
return gauges;
}
@Override
public String toString() {
return "MetricData{" +
"jobId='" + jobId + '\'' +
", stage=" + stage +
", workerIndex=" + workerIndex +
", workerNumber=" + workerNumber +
", metricGroupName='" + metricGroupName + '\'' +
", gauges=" + gauges +
'}';
}
}
| 8,453 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/AutoScaleMetricsConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster;
import static io.mantisrx.server.core.stats.MetricStringConstants.*;
import static io.reactivex.mantis.network.push.PushServerSse.*;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.regex.Pattern;
public class AutoScaleMetricsConfig {
public static final String CLIENT_ID_TOKEN = "_CLIENT_ID_";
public static final String OUTBOUND_METRIC_GROUP_PATTERN = String.format("%s:%s=%s:*", PUSH_SERVER_METRIC_GROUP_NAME, CLIENT_ID_TAG_NAME, CLIENT_ID_TOKEN);
public static final String OUTBOUND_LEGACY_METRIC_GROUP_PATTERN = String.format("%s:%s=%s:*", PUSH_SERVER_LEGACY_METRIC_GROUP_NAME, CLIENT_ID_TAG_NAME, CLIENT_ID_TOKEN);
private static final AggregationAlgo DEFAULT_ALGO = AggregationAlgo.AVERAGE;
// autoscaling metric groups to subscribe to by default
private static final Map<String, Map<String, AggregationAlgo>> defaultAutoScaleMetrics = new HashMap<>();
private final Map<String, Map<String, AggregationAlgo>> sourceJobMetrics = new HashMap<>();
private final Map<String, Pattern> sourceJobMetricsPatterns = new HashMap<>();
static {
defaultAutoScaleMetrics.put(RESOURCE_USAGE_METRIC_GROUP, new HashMap<>());
defaultAutoScaleMetrics.put(DATA_DROP_METRIC_GROUP, new HashMap<>());
final Map<String, AggregationAlgo> defaultKafkaConsumerMetric = new HashMap<>();
defaultKafkaConsumerMetric.put(KAFKA_LAG, AggregationAlgo.MAX);
defaultAutoScaleMetrics.put(KAFKA_CONSUMER_FETCH_MGR_METRIC_GROUP, defaultKafkaConsumerMetric);
final Map<String, AggregationAlgo> defaultWorkerStageInnerInputMetric = new HashMap<>();
defaultWorkerStageInnerInputMetric.put(ON_NEXT_GAUGE, AggregationAlgo.AVERAGE);
defaultAutoScaleMetrics.put(WORKER_STAGE_INNER_INPUT, defaultWorkerStageInnerInputMetric);
}
private final Map<String, Map<String, AggregationAlgo>> userDefinedAutoScaleMetrics;
public AutoScaleMetricsConfig() {
this(new HashMap<>());
}
public AutoScaleMetricsConfig(final Map<String, Map<String, AggregationAlgo>> userDefinedAutoScaleMetrics) {
this.userDefinedAutoScaleMetrics = userDefinedAutoScaleMetrics;
final Map<String, AggregationAlgo> defaultOutboundMetric = new HashMap<>();
defaultOutboundMetric.put(DROPPED_COUNTER_METRIC_NAME, AggregationAlgo.MAX);
sourceJobMetrics.put(OUTBOUND_METRIC_GROUP_PATTERN, defaultOutboundMetric);
sourceJobMetrics.put(OUTBOUND_LEGACY_METRIC_GROUP_PATTERN, defaultOutboundMetric);
sourceJobMetricsPatterns.put(OUTBOUND_METRIC_GROUP_PATTERN, generateSourceJobMetricPattern(OUTBOUND_METRIC_GROUP_PATTERN));
sourceJobMetricsPatterns.put(OUTBOUND_LEGACY_METRIC_GROUP_PATTERN, generateSourceJobMetricPattern(OUTBOUND_LEGACY_METRIC_GROUP_PATTERN));
}
public void addUserDefinedMetric(final String metricGroupName,
final String metricName,
final AggregationAlgo algo) {
userDefinedAutoScaleMetrics.putIfAbsent(metricGroupName, new HashMap<>());
userDefinedAutoScaleMetrics.get(metricGroupName).put(metricName, algo);
}
/**
* Add source job drop metric patterns in addition to the default patterns.
* @param metricsStr comma separated list of metrics in the form of metricGroupName::metricName::algo
*/
public void addSourceJobDropMetrics(String metricsStr) {
if (metricsStr == null) {
return;
}
for (String metric : metricsStr.split(",")) {
metric = metric.trim();
if (metric.isEmpty()) {
continue;
}
try {
String[] parts = metric.split("::");
String metricGroupName = parts[0];
String metricName = parts[1];
AggregationAlgo algo = AggregationAlgo.valueOf(parts[2]);
Map<String, AggregationAlgo> metricGroup = sourceJobMetrics.get(metricGroupName);
if (metricGroup == null) {
metricGroup = new HashMap<>();
sourceJobMetrics.put(metricGroupName, metricGroup);
sourceJobMetricsPatterns.put(metricGroupName, generateSourceJobMetricPattern(metricGroupName));
}
metricGroup.put(metricName, algo);
} catch (Exception ex) {
String errMsg = String.format("Invalid format for source job metric: %s", metricsStr);
throw new RuntimeException(errMsg, ex);
}
}
}
public AggregationAlgo getAggregationAlgo(final String metricGroupName, final String metricName) {
if (userDefinedAutoScaleMetrics.containsKey(metricGroupName) && userDefinedAutoScaleMetrics.get(metricGroupName).containsKey(metricName)) {
return userDefinedAutoScaleMetrics.get(metricGroupName).getOrDefault(metricName, DEFAULT_ALGO);
}
if (defaultAutoScaleMetrics.containsKey(metricGroupName) && defaultAutoScaleMetrics.get(metricGroupName).containsKey(metricName)) {
return defaultAutoScaleMetrics.get(metricGroupName).getOrDefault(metricName, DEFAULT_ALGO);
}
for (Map.Entry<String, Pattern> entry : sourceJobMetricsPatterns.entrySet()) {
if (entry.getValue().matcher(metricGroupName).matches()) {
return sourceJobMetrics.get(entry.getKey()).getOrDefault(metricName, DEFAULT_ALGO);
}
}
return DEFAULT_ALGO;
}
public Map<String, Set<String>> getAllMetrics() {
final Map<String, Set<String>> metrics = new HashMap<>();
for (Map.Entry<String, Map<String, AggregationAlgo>> entry : defaultAutoScaleMetrics.entrySet()) {
metrics.put(entry.getKey(),
entry.getValue().keySet());
}
for (Map.Entry<String, Map<String, AggregationAlgo>> entry : userDefinedAutoScaleMetrics.entrySet()) {
metrics.put(entry.getKey(),
entry.getValue().keySet());
}
return metrics;
}
public Map<String, Set<String>> getUserDefinedMetrics() {
final Map<String, Set<String>> metrics = new HashMap<>();
for (Map.Entry<String, Map<String, AggregationAlgo>> entry : userDefinedAutoScaleMetrics.entrySet()) {
metrics.put(entry.getKey(),
entry.getValue().keySet());
}
return metrics;
}
public Set<String> getMetricGroups() {
return getAllMetrics().keySet();
}
public Set<String> generateSourceJobMetricGroups(Set<String> clientIds) {
Set<String> results = new HashSet<>();
for (String clientId : clientIds) {
for (String metricPattern : sourceJobMetrics.keySet()) {
results.add(metricPattern.replaceAll(CLIENT_ID_TOKEN, clientId));
}
}
return results;
}
public boolean isSourceJobDropMetric(String metricGroupName, String metricName) {
for (Map.Entry<String, Pattern> entry : sourceJobMetricsPatterns.entrySet()) {
if (entry.getValue().matcher(metricGroupName).matches()) {
return sourceJobMetrics.get(entry.getKey()).keySet().contains(metricName);
}
}
return false;
}
private static Pattern generateSourceJobMetricPattern(String metricGroupName) {
return Pattern.compile(metricGroupName.replace("*", ".*").replaceAll(CLIENT_ID_TOKEN, ".*"));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AutoScaleMetricsConfig that = (AutoScaleMetricsConfig) o;
return userDefinedAutoScaleMetrics != null ? userDefinedAutoScaleMetrics.equals(that.userDefinedAutoScaleMetrics) : that.userDefinedAutoScaleMetrics == null;
}
@Override
public int hashCode() {
return userDefinedAutoScaleMetrics != null ? userDefinedAutoScaleMetrics.hashCode() : 0;
}
@Override
public String toString() {
return "AutoScaleMetricsConfig{" +
"userDefinedAutoScaleMetrics=" + userDefinedAutoScaleMetrics +
'}';
}
public enum AggregationAlgo {
AVERAGE,
MAX
}
}
| 8,454 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control/AdaptiveAutoscaler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control;
import io.mantisrx.server.worker.jobmaster.JobAutoScaler;
import io.mantisrx.server.worker.jobmaster.control.actuators.MantisStageActuator;
import io.mantisrx.server.worker.jobmaster.control.controllers.PIDController;
import io.mantisrx.server.worker.jobmaster.control.utils.ErrorComputer;
import io.mantisrx.server.worker.jobmaster.control.utils.Integrator;
import java.util.concurrent.atomic.AtomicLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
/**
* AutoScaler is an Rx Transformer which can be composed with a stream
* of Metrics in order to scale a specific stage.
*/
public class AdaptiveAutoscaler implements Observable.Transformer<JobAutoScaler.Event, Object> {
private static Logger logger = LoggerFactory.getLogger(AdaptiveAutoscaler.class);
private final AdaptiveAutoscalerConfig config;
private final JobAutoScaler.StageScaler scaler;
private final long initialSize;
private final AtomicLong targetScale = new AtomicLong(0);
public AdaptiveAutoscaler(AdaptiveAutoscalerConfig config, JobAutoScaler.StageScaler scaler, int initialSize) {
this.config = config;
this.scaler = scaler;
this.initialSize = initialSize;
this.targetScale.set(initialSize);
}
@Override
public Observable<Object> call(Observable<JobAutoScaler.Event> metrics) {
return metrics
.filter(metric -> ((long) metric.getNumWorkers()) == targetScale.get())
.map(JobAutoScaler.Event::getValue)
.lift(new ErrorComputer(config.setPoint, config.invert, config.rope))
.lift(PIDController.of(config.kp, config.ki, config.kd))
.lift(new Integrator(this.initialSize, config.minScale, config.maxScale))
.lift(new MantisStageActuator(this.initialSize, scaler))
.map(Math::round)
.doOnNext(targetScale::set)
.map(x -> (Object) x); // TODO: Necessary?
}
}
| 8,455 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control/Controller.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control;
import rx.Observable;
import rx.Subscriber;
/**
* The Feedback Principle: Constantly compare the actual output to the
* setpoint; then apply a corrective action in the proper direction and
* approximately of the correct size.
* <p>
* Iteratively applying changes in the correct direction allows this
* system to converge onto the correct value over time.
*/
public abstract class Controller implements Observable.Operator<Double, Double> {
private final Controller parent = this;
abstract protected Double processStep(Double error);
@Override
public Subscriber<? super Double> call(final Subscriber<? super Double> s) {
return new Subscriber<Double>(s) {
@Override
public void onCompleted() {
if (!s.isUnsubscribed()) {
s.onCompleted();
}
}
@Override
public void onError(Throwable t) {
if (!s.isUnsubscribed()) {
s.onError(t);
}
}
@Override
public void onNext(Double error) {
Double controlAction = parent.processStep(error);
if (!s.isUnsubscribed()) {
s.onNext(controlAction);
}
}
};
}
}
| 8,456 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control/AdaptiveAutoscalerConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
public class AdaptiveAutoscalerConfig {
public final StageScalingPolicy.ScalingReason metric;
public final double setPoint;
public final boolean invert;
public final double rope;
// Gain
public final double kp;
public final double ki;
public final double kd;
public final double minScale;
public final double maxScale;
@java.beans.ConstructorProperties( {"metric", "setPoint", "invert", "rope", "kp", "ki", "kd", "minScale", "maxScale"})
public AdaptiveAutoscalerConfig(StageScalingPolicy.ScalingReason metric, double setPoint, boolean invert, double rope, double kp, double ki, double kd, double minScale, double maxScale) {
this.metric = metric;
this.setPoint = setPoint;
this.invert = invert;
this.rope = rope;
this.kp = kp;
this.ki = ki;
this.kd = kd;
this.minScale = minScale;
this.maxScale = maxScale;
}
public static void main(String[] args) throws IOException {
ObjectMapper objectMapper = new ObjectMapper();
String cfg = "{\"metric\": \"KafkaProcessed\", \"setPoint\": 1, \"invert\": true, \"rope\": 0, \"kp\": 0.1, \"ki\": 0.00, \"kd\": 0.00, \"minScale\": 1, \"maxScale\": 5}\n";
AdaptiveAutoscalerConfig config = objectMapper
.readValue(cfg,
new TypeReference<AdaptiveAutoscalerConfig>() {});
System.out.println(config.toString());
}
public StageScalingPolicy.ScalingReason getMetric() {
return this.metric;
}
public double getSetPoint() {
return this.setPoint;
}
public boolean isInvert() {
return this.invert;
}
public double getRope() {
return this.rope;
}
public double getKp() {
return this.kp;
}
public double getKi() {
return this.ki;
}
public double getKd() {
return this.kd;
}
public double getMinScale() {
return this.minScale;
}
public double getMaxScale() {
return this.maxScale;
}
public boolean equals(Object o) {
if (o == this) return true;
if (!(o instanceof AdaptiveAutoscalerConfig)) return false;
final AdaptiveAutoscalerConfig other = (AdaptiveAutoscalerConfig) o;
final Object this$metric = this.getMetric();
final Object other$metric = other.getMetric();
if (this$metric == null ? other$metric != null : !this$metric.equals(other$metric)) return false;
if (Double.compare(this.getSetPoint(), other.getSetPoint()) != 0) return false;
if (this.isInvert() != other.isInvert()) return false;
if (Double.compare(this.getRope(), other.getRope()) != 0) return false;
if (Double.compare(this.getKp(), other.getKp()) != 0) return false;
if (Double.compare(this.getKi(), other.getKi()) != 0) return false;
if (Double.compare(this.getKd(), other.getKd()) != 0) return false;
if (Double.compare(this.getMinScale(), other.getMinScale()) != 0) return false;
if (Double.compare(this.getMaxScale(), other.getMaxScale()) != 0) return false;
return true;
}
public int hashCode() {
final int PRIME = 59;
int result = 1;
final Object $metric = this.getMetric();
result = result * PRIME + ($metric == null ? 43 : $metric.hashCode());
final long $setPoint = Double.doubleToLongBits(this.getSetPoint());
result = result * PRIME + (int) ($setPoint >>> 32 ^ $setPoint);
result = result * PRIME + (this.isInvert() ? 79 : 97);
final long $rope = Double.doubleToLongBits(this.getRope());
result = result * PRIME + (int) ($rope >>> 32 ^ $rope);
final long $kp = Double.doubleToLongBits(this.getKp());
result = result * PRIME + (int) ($kp >>> 32 ^ $kp);
final long $ki = Double.doubleToLongBits(this.getKi());
result = result * PRIME + (int) ($ki >>> 32 ^ $ki);
final long $kd = Double.doubleToLongBits(this.getKd());
result = result * PRIME + (int) ($kd >>> 32 ^ $kd);
final long $minScale = Double.doubleToLongBits(this.getMinScale());
result = result * PRIME + (int) ($minScale >>> 32 ^ $minScale);
final long $maxScale = Double.doubleToLongBits(this.getMaxScale());
result = result * PRIME + (int) ($maxScale >>> 32 ^ $maxScale);
return result;
}
public String toString() {
return "AdaptiveAutoscalerConfig(metric=" + this.getMetric() + ", setPoint=" + this.getSetPoint() + ", invert=" + this.isInvert() + ", rope=" + this.getRope() + ", kp=" + this.getKp() + ", ki=" + this.getKi() + ", kd=" + this.getKd() + ", minScale=" + this.getMinScale() + ", maxScale=" + this.getMaxScale() + ")";
}
}
| 8,457 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control/utils/Derivative.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control.utils;
import io.mantisrx.server.worker.jobmaster.control.Controller;
public class Derivative extends Controller {
private double last = 0;
private boolean initialized = false;
// TODO: What if this is uninitialized?
@Override
protected Double processStep(Double input) {
if (initialized) {
double output = input - last;
this.last = input;
return output;
} else {
return 0.0;
}
}
}
| 8,458 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control/utils/Integrator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control.utils;
import io.mantisrx.server.worker.jobmaster.control.Controller;
public class Integrator extends Controller {
private double sum = 0;
private double min = Double.NEGATIVE_INFINITY;
private double max = Double.POSITIVE_INFINITY;
public Integrator() {
}
public Integrator(double init) {
this.sum = init;
}
public Integrator(double init, double min, double max) {
this.sum = init;
this.min = min;
this.max = max;
}
/**
* A Clutch specific optimization, I don't like this one bit,
* and would like to clean it up before OSS probably tearing down the
* Rx pipeline and rewiring it instead.
*
* @param val The value to which this integrator will be set.
*/
public void setSum(double val) {
this.sum = val;
}
@Override
protected Double processStep(Double input) {
sum += input;
sum = (sum > max) ? max : sum;
sum = (sum < min) ? min : sum;
return sum;
}
}
| 8,459 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control/utils/TransformerWrapper.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control.utils;
import rx.Observable;
/*
Wraps an rx Operator into an rx Transformer
*/
public class TransformerWrapper<T, R> implements Observable.Transformer<T, R> {
private final Observable.Operator<R, T> op;
public TransformerWrapper(Observable.Operator<R, T> op) {
this.op = op;
}
@Override
public Observable<R> call(Observable<T> tObservable) {
return tObservable.lift(op);
}
}
| 8,460 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control/utils/ErrorComputer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control.utils;
import io.mantisrx.server.worker.jobmaster.control.Controller;
/**
* The loss computation is generally the first step in a control system.
* The responsibility of this component is to compute the loss function
* on the output of the system under control.
* <p>
* Example:
* [ErrorComputer] -> PIDController -> Integrator -> Actuator
* <p>
* The loss acts as input for the control system.
*/
public class ErrorComputer extends Controller {
private final double setPoint;
private final boolean inverted;
private final double lowerRope;
private final double upperRope;
/**
* @param setPoint The target value for the metric being tracked.
* @param inverted A boolean indicating whether or not to invert output. Output is generally inverted if increasing
* the plant input will decrease the output. For example when autoscaling increasing the number
* of worker instances will decrease messages processed per instance. This is an inverted problem.
* @param lowerRope Region of practical equivalence (ROPE) -- a region surrounding the setpoint considered equal to the setpoint.
* @param upperRope Region of practical equivalence (ROPE) -- a region surrounding the setpoint considered equal to the setpoint.
*/
public ErrorComputer(double setPoint, boolean inverted, double lowerRope, double upperRope) {
this.setPoint = setPoint;
this.inverted = inverted;
this.lowerRope = lowerRope;
this.upperRope = upperRope;
}
public ErrorComputer(double setPoint, boolean inverted, double rope) {
this.setPoint = setPoint;
this.inverted = inverted;
this.lowerRope = rope;
this.upperRope = rope;
}
/**
* Computes the correct loss value considering all values within [setPoint-rope, setPoint+rope] are considered
* to be equivalent. Error must grow linearly once the value is outside of the ROPE, without a calculation such as
* this the loss is a step function once crossing the threshold, with this function loss is zero and linearly
* increases as it deviates from the setpoint and ROPE.
*
* @param setPoint The configured setPoint.
* @param observed The observed metric to be compared to the setPoint.
* @param lowerRope The region of practical equivalence (ROPE) on the lower end.
* @param upperRope The region of practical equivalence (ROPE) on the upper end.
*
* @return Error adjusted for the ROPE.
*/
public static double loss(double setPoint, double observed, double lowerRope, double upperRope) {
if (observed > setPoint + upperRope) {
return (setPoint + upperRope) - observed;
} else if (observed < setPoint - lowerRope) {
return (setPoint - lowerRope) - observed;
}
return 0.0;
}
@Override
public Double processStep(Double input) {
return inverted ?
-1.0 * loss(setPoint, input, lowerRope, upperRope) :
loss(setPoint, input, lowerRope, upperRope);
}
}
| 8,461 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control/actuators/MantisStageActuator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control.actuators;
import com.netflix.control.IActuator;
import io.mantisrx.server.worker.jobmaster.JobAutoScaler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Actuator for Mantis Stages which acts as a lifter for the
* JobAutoScaler.StageScaler which brings it into the context of our
* control system DSL.
*/
public class MantisStageActuator extends IActuator {
private static Logger logger = LoggerFactory.getLogger(MantisStageActuator.class);
private final JobAutoScaler.StageScaler scaler;
private Long lastValue;
public MantisStageActuator(long initialSize, JobAutoScaler.StageScaler scaler) {
this.scaler = scaler;
this.lastValue = initialSize;
}
protected Double processStep(Double input) {
Long desiredNumWorkers = ((Double) Math.ceil(input)).longValue();
String reason = "Clutch determined " + desiredNumWorkers + " instance(s) for target resource usage.";
if (desiredNumWorkers < this.lastValue) {
scaler.scaleDownStage(lastValue.intValue(), desiredNumWorkers.intValue(), reason);
this.lastValue = desiredNumWorkers;
} else if (desiredNumWorkers > this.lastValue) {
scaler.scaleUpStage(lastValue.intValue(), desiredNumWorkers.intValue(), reason);
this.lastValue = desiredNumWorkers;
} else {
}
return desiredNumWorkers * 1.0;
}
}
| 8,462 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control/actuators/ClutchMantisStageActuator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control.actuators;
import io.mantisrx.server.worker.jobmaster.JobAutoScaler;
import io.vavr.Tuple3;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
public class ClutchMantisStageActuator implements Observable.Transformer<Tuple3<String, Double, Integer>, Double> {
private static Logger logger = LoggerFactory.getLogger(MantisStageActuator.class);
private final JobAutoScaler.StageScaler scaler;
public ClutchMantisStageActuator(JobAutoScaler.StageScaler scaler) {
this.scaler = scaler;
}
protected Double processStep(Tuple3<String, Double, Integer> tup) {
int desiredNumWorkers = ((Double) Math.ceil(tup._2)).intValue();
logger.info("Received request to scale to {} from {} workers.", desiredNumWorkers, tup._3);
String reason = tup._1;
if (desiredNumWorkers < tup._3) {
scaler.scaleDownStage(tup._3, desiredNumWorkers, reason);
} else if (desiredNumWorkers > tup._3) {
scaler.scaleUpStage(tup._3, desiredNumWorkers, reason);
} else {
}
return desiredNumWorkers * 1.0;
}
@Override
public Observable<Double> call(Observable<Tuple3<String, Double, Integer>> tuple2Observable) {
return tuple2Observable.map(this::processStep);
}
}
| 8,463 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/control/controllers/PIDController.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.control.controllers;
import io.mantisrx.server.worker.jobmaster.control.Controller;
import io.mantisrx.shaded.com.google.common.util.concurrent.AtomicDouble;
/**
* The Feedback Principle: Constantly compare the actual output to the
* setpoint; then apply a corrective action in the proper direction and
* approximately of the correct size.
* <p>
* Iteratively applying changes in the correct direction allows this
* system to converge onto the correct value over time.
*/
public class PIDController extends Controller {
private final Double kp; // Proportional Gain
private final Double ki; // Integral Gain
private final Double kd; // Derivative Gain
private final AtomicDouble dampener;
private final double deltaT;
private Double previous = 0.0;
private Double integral = 0.0;
private Double derivative = 0.0;
/**
* Implements a Proportional-Integral-Derivative (PID) three term control
* system.
*
* @param kp The gain for the proportional component of the controller.
* @param ki The gain for the integral component of the controller.
* @param kd The gain for the derivative component of the controller.
* @param deltaT The time delta. A useful default is 1.0.
* @param dampener The gain dampening factor.
* <p>
* Setting the gain for an individual component disables said
* component. For example setting kd to 0.0 creates a PI (two term) control
* system.
*/
public PIDController(Double kp, Double ki, Double kd, Double deltaT, AtomicDouble dampener) {
this.kp = kp;
this.ki = ki;
this.kd = kd;
this.deltaT = deltaT;
this.dampener = dampener;
}
public static PIDController of(Double kp, Double ki, Double kd, Double deltaT) {
return new PIDController(kp, ki, kd, deltaT, new AtomicDouble(1.0));
}
public static PIDController of(Double kp, Double ki, Double kd) {
return new PIDController(kp, ki, kd, 1.0, new AtomicDouble(1.0));
}
public static PIDController of(Double kp, Double ki, Double kd, Double deltaT, AtomicDouble dampener) {
return new PIDController(kp, ki, kd, deltaT, dampener);
}
@Override
public Double processStep(Double error) {
this.integral += this.deltaT * error;
this.derivative = (error - this.previous) / this.deltaT;
this.previous = error;
return (this.dampener.get() * this.kp) * error
+ (this.dampener.get() * this.ki) * this.integral
+ (this.dampener.get() * this.kd) * this.derivative;
}
}
| 8,464 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch/ClutchPIDConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.clutch;
import io.vavr.Tuple2;
public class ClutchPIDConfig {
public final double setPoint;
public final Tuple2<Double, Double> rope;
// Gain
public final double kp;
public final double kd;
@java.beans.ConstructorProperties( {"setPoint", "rope", "kp", "kd"})
public ClutchPIDConfig(double setPoint, Tuple2<Double, Double> rope, double kp, double kd) {
this.setPoint = setPoint;
this.rope = rope;
this.kp = kp;
this.kd = kd;
}
public double getSetPoint() {
return this.setPoint;
}
public Tuple2<Double, Double> getRope() {
return this.rope;
}
public double getKp() {
return this.kp;
}
public double getKd() {
return this.kd;
}
public boolean equals(Object o) {
if (o == this) return true;
if (!(o instanceof ClutchPIDConfig)) return false;
final ClutchPIDConfig other = (ClutchPIDConfig) o;
if (Double.compare(this.getSetPoint(), other.getSetPoint()) != 0) return false;
final Object this$rope = this.getRope();
final Object other$rope = other.getRope();
if (this$rope == null ? other$rope != null : !this$rope.equals(other$rope)) return false;
if (Double.compare(this.getKp(), other.getKp()) != 0) return false;
if (Double.compare(this.getKd(), other.getKd()) != 0) return false;
return true;
}
public int hashCode() {
final int PRIME = 59;
int result = 1;
final long $setPoint = Double.doubleToLongBits(this.getSetPoint());
result = result * PRIME + (int) ($setPoint >>> 32 ^ $setPoint);
final Object $rope = this.getRope();
result = result * PRIME + ($rope == null ? 43 : $rope.hashCode());
final long $kp = Double.doubleToLongBits(this.getKp());
result = result * PRIME + (int) ($kp >>> 32 ^ $kp);
final long $kd = Double.doubleToLongBits(this.getKd());
result = result * PRIME + (int) ($kd >>> 32 ^ $kd);
return result;
}
public String toString() {
return "ClutchPIDConfig(setPoint=" + this.getSetPoint() + ", rope=" + this.getRope() + ", kp=" + this.getKp() + ", kd=" + this.getKd() + ")";
}
}
| 8,465 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch/ClutchConfiguration.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.clutch;
import io.mantisrx.server.worker.jobmaster.clutch.rps.ClutchRpsPIDConfig;
import io.vavr.control.Option;
public class ClutchConfiguration {
public final int minSize;
public final int maxSize;
public final double rps;
public final Option<Long> minSamples;
public final Option<Long> cooldownSeconds;
public final Option<Double> panicThresholdSeconds;
public final Option<Double> maxAdjustment;
public final Option<Boolean> useExperimental;
public final Option<Double> integralDecay;
public final Option<ClutchPIDConfig> cpu;
public final Option<ClutchPIDConfig> memory;
public final Option<ClutchPIDConfig> network;
public final Option<ClutchRpsPIDConfig> rpsConfig;
@java.beans.ConstructorProperties( {"minSize", "maxSize", "rps", "minSamples", "cooldownSeconds", "panicThresholdSeconds", "maxAdjustment", "cpu", "memory", "network", "rpsConfig", "useExperimental", "integralDecay"})
public ClutchConfiguration(int minSize, int maxSize, double rps, Option<Long> minSamples, Option<Long> cooldownSeconds,
Option<Double> panicThresholdSeconds, Option<Double> maxAdjustment, Option<ClutchPIDConfig> cpu,
Option<ClutchPIDConfig> memory, Option<ClutchPIDConfig> network, Option<ClutchRpsPIDConfig> rpsConfig,
Option<Boolean> useExperimental, Option<Double> integralDecay) {
this.minSize = minSize;
this.maxSize = maxSize;
this.rps = rps;
this.minSamples = minSamples;
this.cooldownSeconds = cooldownSeconds;
this.panicThresholdSeconds = panicThresholdSeconds;
this.maxAdjustment = maxAdjustment;
this.cpu = cpu;
this.memory = memory;
this.network = network;
this.rpsConfig = rpsConfig;
this.useExperimental = useExperimental;
this.integralDecay = integralDecay;
}
public int getMinSize() {
return this.minSize;
}
public int getMaxSize() {
return this.maxSize;
}
public double getRps() {
return this.rps;
}
public Option<Long> getMinSamples() {
return this.minSamples;
}
public Option<Long> getCooldownSeconds() {
return this.cooldownSeconds;
}
public Option<Double> getPanicThresholdSeconds() {
return this.panicThresholdSeconds;
}
public Option<Double> getMaxAdjustment() {
return this.maxAdjustment;
}
public Option<Boolean> getUseExperimental() {
return this.useExperimental;
}
public Option<Double> getIntegralDecay() {
return this.integralDecay;
}
public Option<ClutchPIDConfig> getCpu() {
return this.cpu;
}
public Option<ClutchPIDConfig> getMemory() {
return this.memory;
}
public Option<ClutchPIDConfig> getNetwork() {
return this.network;
}
public Option<ClutchRpsPIDConfig> getRpsConfig() {
return this.rpsConfig;
}
public boolean equals(Object o) {
if (o == this) return true;
if (!(o instanceof ClutchConfiguration)) return false;
final ClutchConfiguration other = (ClutchConfiguration) o;
if (this.getMinSize() != other.getMinSize()) return false;
if (this.getMaxSize() != other.getMaxSize()) return false;
if (Double.compare(this.getRps(), other.getRps()) != 0) return false;
final Object this$minSamples = this.getMinSamples();
final Object other$minSamples = other.getMinSamples();
if (this$minSamples == null ? other$minSamples != null : !this$minSamples.equals(other$minSamples))
return false;
final Object this$cooldownSeconds = this.getCooldownSeconds();
final Object other$cooldownSeconds = other.getCooldownSeconds();
if (this$cooldownSeconds == null ? other$cooldownSeconds != null : !this$cooldownSeconds.equals(other$cooldownSeconds))
return false;
final Object this$panicThresholdSeconds = this.getPanicThresholdSeconds();
final Object other$panicThresholdSeconds = other.getPanicThresholdSeconds();
if (this$panicThresholdSeconds == null ? other$panicThresholdSeconds != null : !this$panicThresholdSeconds.equals(other$panicThresholdSeconds))
return false;
final Object this$maxAdjustment = this.getMaxAdjustment();
final Object other$maxAdjustment = other.getMaxAdjustment();
if (this$maxAdjustment == null ? other$maxAdjustment != null : !this$maxAdjustment.equals(other$maxAdjustment))
return false;
final Object this$cpu = this.getCpu();
final Object other$cpu = other.getCpu();
if (this$cpu == null ? other$cpu != null : !this$cpu.equals(other$cpu)) return false;
final Object this$memory = this.getMemory();
final Object other$memory = other.getMemory();
if (this$memory == null ? other$memory != null : !this$memory.equals(other$memory)) return false;
final Object this$network = this.getNetwork();
final Object other$network = other.getNetwork();
if (this$network == null ? other$network != null : !this$network.equals(other$network)) return false;
final Object this$rpsConfig = this.getRpsConfig();
final Object other$rpsConfig = other.getRpsConfig();
if (this$rpsConfig == null ? other$rpsConfig != null : !this$rpsConfig.equals(other$rpsConfig)) return false;
final Object this$useExperimental = this.getUseExperimental();
final Object other$useExperimental = other.getUseExperimental();
if (this$useExperimental == null ? other$useExperimental != null : !this$useExperimental.equals(other$useExperimental)) return false;
final Object this$integralDecay = this.getIntegralDecay();
final Object other$integralDecay = other.getIntegralDecay();
if (this$integralDecay == null ? other$integralDecay != null : !this$integralDecay.equals(other$integralDecay))
return false;
return true;
}
public int hashCode() {
final int PRIME = 59;
int result = 1;
result = result * PRIME + this.getMinSize();
result = result * PRIME + this.getMaxSize();
final long $rps = Double.doubleToLongBits(this.getRps());
result = result * PRIME + (int) ($rps >>> 32 ^ $rps);
final Object $minSamples = this.getMinSamples();
result = result * PRIME + ($minSamples == null ? 43 : $minSamples.hashCode());
final Object $cooldownSeconds = this.getCooldownSeconds();
result = result * PRIME + ($cooldownSeconds == null ? 43 : $cooldownSeconds.hashCode());
final Object $panicThresholdSeconds = this.getPanicThresholdSeconds();
result = result * PRIME + ($panicThresholdSeconds == null ? 43 : $panicThresholdSeconds.hashCode());
final Object $maxAdjustment = this.getMaxAdjustment();
result = result * PRIME + ($maxAdjustment == null ? 43 : $maxAdjustment.hashCode());
final Object $cpu = this.getCpu();
result = result * PRIME + ($cpu == null ? 43 : $cpu.hashCode());
final Object $memory = this.getMemory();
result = result * PRIME + ($memory == null ? 43 : $memory.hashCode());
final Object $network = this.getNetwork();
result = result * PRIME + ($network == null ? 43 : $network.hashCode());
final Object $rpsConfig = this.getRpsConfig();
result = result * PRIME + ($rpsConfig == null ? 43 : $rpsConfig.hashCode());
final Object $useExperimental = this.getUseExperimental();
result = result * PRIME + ($useExperimental == null ? 43 : $useExperimental.hashCode());
final Object $integralDecay = this.getIntegralDecay();
result = result * PRIME + ($integralDecay == null ? 43 : $integralDecay.hashCode());
return result;
}
public String toString() {
return "ClutchConfiguration(minSize=" + this.getMinSize() + ", maxSize=" + this.getMaxSize() + ", rps=" + this.getRps() +
", minSamples=" + this.getMinSamples() + ", cooldownSeconds=" + this.getCooldownSeconds() +
", panicThresholdSeconds=" + this.getPanicThresholdSeconds() + ", maxAdjustment=" + this.getMaxAdjustment() +
", cpu=" + this.getCpu() + ", memory=" + this.getMemory() + ", network=" + this.getNetwork() +
", rpsConfig=" + this.getRpsConfig() + ", useExperimental=" + this.getUseExperimental() +
", integralDecay=" + this.getIntegralDecay() + ")";
}
}
| 8,466 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch/ClutchAutoScaler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.clutch;
import static io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason.CPU;
import static io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason.Memory;
import static io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason.Network;
import com.yahoo.labs.samoa.instances.Attribute;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.server.worker.jobmaster.JobAutoScaler;
import io.mantisrx.server.worker.jobmaster.Util;
import io.mantisrx.server.worker.jobmaster.control.actuators.ClutchMantisStageActuator;
import io.mantisrx.server.worker.jobmaster.control.controllers.PIDController;
import io.mantisrx.server.worker.jobmaster.control.utils.ErrorComputer;
import io.mantisrx.server.worker.jobmaster.control.utils.Integrator;
import io.mantisrx.shaded.com.google.common.cache.Cache;
import io.mantisrx.shaded.com.google.common.cache.CacheBuilder;
import io.mantisrx.shaded.com.google.common.util.concurrent.AtomicDouble;
import io.vavr.Tuple;
import io.vavr.Tuple3;
import java.util.Calendar;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import moa.core.FastVector;
import org.slf4j.Logger;
import rx.Observable;
/**
* ClutchAutoScaler is an Rx Transformer which can be composed with a stream
* of Metrics in order to scale a specific stage.
* Clutch is based on two key concepts;
* - One of the three main resources (CPU, Memory, Network) will be the binding factor for a job.
* - A machine learning model can correct for the error.
*/
public class ClutchAutoScaler implements Observable.Transformer<JobAutoScaler.Event, Object> {
private static final Logger log = org.slf4j.LoggerFactory.getLogger(ClutchAutoScaler.class);
private static final String autoscaleLogMessageFormat = "Autoscaling stage %d to %d instances on controller output: cpu/mem/network %f/%f/%f (dampening: %f) and predicted error: %f with dominant resource: %s";
private static final FastVector attributes = new FastVector();
static {
attributes.add(new Attribute("cpu"));
attributes.add(new Attribute("memory"));
attributes.add(new Attribute("network"));
attributes.add(new Attribute("minuteofday"));
attributes.add(new Attribute("scale"));
attributes.add(new Attribute("error"));
}
private final JobAutoScaler.StageScaler scaler;
private final StageSchedulingInfo stageSchedulingInfo;
private final long initialSize;
private final ClutchConfiguration config;
private final AtomicLong targetScale = new AtomicLong(0);
private final AtomicDouble gainDampeningFactor = new AtomicDouble(1.0);
private final AtomicLong cooldownTimestamp;
private final AtomicLong rps = new AtomicLong(0);
private final ClutchPIDConfig defaultConfig = new ClutchPIDConfig(60.0, Tuple.of(0.0, 25.0), 0.01, 0.01);
Cache<Long, Long> actionCache = CacheBuilder.newBuilder()
.maximumSize(12)
.expireAfterWrite(60, TimeUnit.MINUTES)
.build();
AtomicDouble correction = new AtomicDouble(0.0);
public ClutchAutoScaler(StageSchedulingInfo stageSchedulingInfo, JobAutoScaler.StageScaler scaler, ClutchConfiguration config, int initialSize) {
this.stageSchedulingInfo = stageSchedulingInfo;
this.scaler = scaler;
this.initialSize = initialSize;
this.targetScale.set(initialSize);
this.config = config;
this.rps.set(Math.round(config.rps));
this.cooldownTimestamp = new AtomicLong(System.currentTimeMillis() + config.cooldownSeconds.getOrElse(0L) * 1000);
Observable.interval(60, TimeUnit.SECONDS)
.forEach(__ -> {
double factor = computeGainFactor(actionCache);
log.debug("Setting gain dampening factor to: {}.", factor);
this.gainDampeningFactor.set(factor);
});
}
private static double enforceMinMax(double targetScale, double min, double max) {
if (Double.isNaN(targetScale)) {
targetScale = min;
}
if (targetScale < min) {
return min;
}
if (targetScale > max) {
return max;
}
return targetScale;
}
private static int getMinutesIntoDay() {
Calendar now = Calendar.getInstance();
int hour = now.get(Calendar.HOUR_OF_DAY);
int minute = now.get(Calendar.MINUTE);
return ((hour * 60) + minute);
}
/**
* Computes the dampening factor for gain.
* The objective here is to reduce gain in a situation where actions are varying.
* The dampening facor is defined as such:
* x = the percentage of scaling actions in the same direction (up or down)
* dampening = x ^ 3; 0.5 <= x <= 1.0
*
* @param actionCache A cache of timestamp -> scale
*
* @return The computed gain dampening factor.
*/
private double computeGainFactor(Cache<Long, Long> actionCache) {
long nUp = actionCache.asMap().values().stream().filter(x -> x > 0.0).count();
long nDown = actionCache.asMap().values().stream().filter(x -> x < 0.0).count();
long n = nUp + nDown;
double x = n == 0
? 1.0
: nUp > nDown
? (1.0 * nUp) / n
: (1.0 * nDown) / n;
return Math.pow(x, 3);
}
private ClutchControllerOutput findDominatingResource(Tuple3<ClutchControllerOutput, ClutchControllerOutput, ClutchControllerOutput> triple) {
if (triple._1.scale >= triple._2.scale && triple._1.scale >= triple._3.scale) {
return triple._1;
} else if (triple._2.scale >= triple._1.scale && triple._2.scale >= triple._3.scale) {
return triple._2;
} else {
return triple._3;
}
}
@Override
public Observable<Object> call(Observable<JobAutoScaler.Event> metrics) {
metrics = metrics
.share();
ClutchController cpuController = new ClutchController(CPU, this.stageSchedulingInfo, this.config.cpu.getOrElse(defaultConfig), this.gainDampeningFactor, this.initialSize, this.config.minSize, this.config.maxSize);
ClutchController memController = new ClutchController(Memory, this.stageSchedulingInfo, this.config.memory.getOrElse(defaultConfig), this.gainDampeningFactor, this.initialSize, this.config.minSize, this.config.maxSize);
ClutchController netController = new ClutchController(Network, this.stageSchedulingInfo, this.config.network.getOrElse(defaultConfig), this.gainDampeningFactor, this.initialSize, this.config.minSize, this.config.maxSize);
Observable<ClutchControllerOutput> cpuSignal = metrics.filter(event -> event.getType().equals(CPU))
.compose(cpuController);
Observable<ClutchControllerOutput> memorySignal = metrics.filter(event -> event.getType().equals(Memory))
.compose(memController);
Observable<ClutchControllerOutput> networkSignal = metrics.filter(event -> event.getType().equals(Network))
.compose(netController);
Observable<Tuple3<Double, Double, Double>> rawMetricsTuples = Observable.zip(
metrics.filter(event -> event.getType().equals(CPU)).map(JobAutoScaler.Event::getValue),
metrics.filter(event -> event.getType().equals(Memory)).map(JobAutoScaler.Event::getValue),
metrics.filter(event -> event.getType().equals(Network)).map(JobAutoScaler.Event::getValue),
Tuple::of);
Observable<Tuple3<ClutchControllerOutput, ClutchControllerOutput, ClutchControllerOutput>> controlSignals = Observable.zip(
cpuSignal,
memorySignal,
networkSignal,
Tuple::of);
Observable<Double> kafkaLag = metrics.filter(event -> event.getType().equals(StageScalingPolicy.ScalingReason.KafkaLag))
.map(JobAutoScaler.Event::getValue)
.map(x -> x / this.config.rps);
Observable<Double> dataDrop = metrics.filter(event -> event.getType().equals(StageScalingPolicy.ScalingReason.DataDrop))
.map(x -> (x.getValue() / 100.0) * x.getNumWorkers());
// Jobs either read from Kafka and produce lag, or they read from other jobs and produce drops.
Observable<Double> error = Observable.merge(Observable.just(0.0), kafkaLag, dataDrop);
Observable<Integer> currentScale = metrics.map(JobAutoScaler.Event::getNumWorkers);
Observable<Tuple3<String, Double, Integer>> controllerSignal = Observable.zip(rawMetricsTuples, controlSignals, Tuple::of)
.withLatestFrom(currentScale, (tup, scale) -> Tuple.of(tup._1, tup._2, scale))
.withLatestFrom(error, (tup, err) -> Tuple.of(tup._1, tup._2, tup._3, err))
.map(tup -> {
int currentWorkerCount = tup._3;
ClutchControllerOutput dominantResource = findDominatingResource(tup._2);
String resourceName = dominantResource.reason.name();
//
// Correction
//
double yhat = tup._4;
yhat = Math.min(yhat, config.maxAdjustment.getOrElse(config.maxSize * 1.0));
yhat = yhat < 1.0 ? 0.0 : yhat;
if (System.currentTimeMillis() > this.cooldownTimestamp.get()) {
double x = correction.addAndGet(yhat);
x = Math.min(x, config.maxAdjustment.getOrElse(config.maxSize * 1.0));
correction.set(x);
}
correction.set(correction.get() * 0.99); // Exponentially decay our correction.
correction.set(Double.isNaN(correction.get()) ? 0.0 : correction.get());
Double targetScale = enforceMinMax(Math.ceil(dominantResource.scale) + Math.ceil(correction.get()), this.config.minSize, this.config.maxSize);
String logMessage = String.format(autoscaleLogMessageFormat, scaler.getStage(), targetScale.intValue(), tup._2._1.scale, tup._2._2.scale, tup._2._3.scale, gainDampeningFactor.get(), correction.get(), resourceName);
return Tuple.of(logMessage, targetScale, currentWorkerCount);
});
return controllerSignal
.filter(__ -> System.currentTimeMillis() > this.cooldownTimestamp.get())
.filter(tup -> Math.abs(Math.round(tup._2) - tup._3) > 0.99) //
.doOnNext(signal -> log.info(signal._1))
.compose(new ClutchMantisStageActuator(this.scaler))
.map(Math::round)
.doOnNext(x -> actionCache.put(System.currentTimeMillis(), x - targetScale.get()))
.doOnNext(targetScale::set)
.doOnNext(__ -> cooldownTimestamp.set(System.currentTimeMillis() + config.cooldownSeconds.getOrElse(0L) * 1000))
.map(x -> (Object) x);
}
private class ClutchController implements Observable.Transformer<JobAutoScaler.Event, ClutchControllerOutput> {
private final ClutchPIDConfig config;
private final StageScalingPolicy.ScalingReason metric;
private final StageSchedulingInfo stageSchedulingInfo;
private final AtomicDouble gainFactor;
private final long initialSize;
private final long min;
private final long max;
private final Integrator integrator;
public ClutchController(StageScalingPolicy.ScalingReason metric, StageSchedulingInfo stageSchedulingInfo, ClutchPIDConfig config, AtomicDouble gainFactor, long initialSize, long min, long max) {
this.metric = metric;
this.config = config;
this.gainFactor = gainFactor;
this.initialSize = initialSize;
this.stageSchedulingInfo = stageSchedulingInfo;
this.min = min;
this.max = max;
this.integrator = new Integrator(this.initialSize, this.min - 1, this.max + 1);
}
public void resetIntegrator(double val) {
this.integrator.setSum(val);
}
@Override
public Observable<ClutchControllerOutput> call(Observable<JobAutoScaler.Event> eventObservable) {
return eventObservable.map(event -> Util.getEffectiveValue(this.stageSchedulingInfo, event.getType(), event.getValue()))
.lift(new ErrorComputer(config.setPoint, true, config.rope._1, config.rope._2))
.lift(PIDController.of(config.kp, 0.0, config.kd, 1.0, this.gainFactor))
.lift(this.integrator)
.map(x -> new ClutchControllerOutput(this.metric, x));
}
}
public static void main(String[] args) {
System.out.println(Double.NaN + 0.0);
System.out.println(Math.ceil(Double.NaN + 0.0));
System.out.println(Double.NaN < 1.0);
System.out.println(Double.NaN > 1.0);
}
}
| 8,467 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch/ClutchControllerOutput.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.clutch;
import io.mantisrx.runtime.descriptor.StageScalingPolicy;
public class ClutchControllerOutput {
public final StageScalingPolicy.ScalingReason reason;
public final Double scale;
public ClutchControllerOutput(StageScalingPolicy.ScalingReason reason, Double scale) {
this.reason = reason;
this.scale = scale;
}
}
| 8,468 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch/experimental/MantisClutchConfigurationSelector.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.clutch.experimental;
import com.netflix.control.clutch.Clutch;
import com.netflix.control.clutch.ClutchConfiguration;
import com.yahoo.sketches.quantiles.UpdateDoublesSketch;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.shaded.com.google.common.util.concurrent.AtomicDouble;
import io.vavr.Function1;
import io.vavr.Tuple;
import io.vavr.Tuple2;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MantisClutchConfigurationSelector implements Function1<Map<Clutch.Metric, UpdateDoublesSketch>, ClutchConfiguration> {
private static final Logger logger = LoggerFactory.getLogger(MantisClutchConfigurationSelector.class);
private final Integer stageNumber;
private final StageSchedulingInfo stageSchedulingInfo;
private final AtomicDouble trueCpuMax = new AtomicDouble(0.0);
private final AtomicDouble trueNetworkMax = new AtomicDouble(0.0);
private final AtomicDouble trueCpuMin = new AtomicDouble(0.0);
private final AtomicDouble trueNetworkMin = new AtomicDouble(0.0);
private final long initializationTime = System.currentTimeMillis();
private final long ONE_DAY_MILLIS = 1000 * 60 * 60 * 24;
private final long TEN_MINUTES_MILLIS = 1000 * 60 * 10;
public MantisClutchConfigurationSelector(Integer stageNumber, StageSchedulingInfo stageSchedulingInfo) {
this.stageNumber = stageNumber;
this.stageSchedulingInfo = stageSchedulingInfo;
}
/**
* Determines a suitable set point within some defined bounds.
* We use a quantile above 0.5 to encourage the function to grow over time if possible.
* @param sketches The map of sketches for which to compute a set point. Must contain RPS.
* @return A suitable set point for autoscaling.
*/
private double getSetpoint(Map<Clutch.Metric, UpdateDoublesSketch> sketches, double numberOfCpuCores) {
double setPoint = sketches.get(Clutch.Metric.RPS).getQuantile(0.75);
double minRps = 1000 * numberOfCpuCores;
double maxRps = 2500 * numberOfCpuCores;
// Checking for high or low values;
if (isSetpointHigh(sketches)
&& System.currentTimeMillis() - initializationTime > ONE_DAY_MILLIS - TEN_MINUTES_MILLIS) {
setPoint *= 0.9;
} else if (isSetpointLow(sketches)
&& System.currentTimeMillis() - initializationTime > ONE_DAY_MILLIS - TEN_MINUTES_MILLIS) {
setPoint *= 1.11;
}
if (isUnderprovisioined(sketches)
&& System.currentTimeMillis() - initializationTime > ONE_DAY_MILLIS) {
logger.info("Job is underprovisioned see previous messages to determine metric.");
}
// Sanity checking against mins / maxes
if (setPoint < minRps) {
logger.info("Setpoint {} was less than minimum {}. Setting to {}.", minRps, minRps);
setPoint = minRps;
}
if (setPoint > maxRps) {
logger.info("Setpoint {} was greater than maximum {}. Setting to {}.", maxRps, maxRps);
setPoint = maxRps;
}
return setPoint;
}
@Override
public ClutchConfiguration apply(Map<Clutch.Metric, UpdateDoublesSketch> sketches) {
updateTrueMaxValues(sketches);
double numberOfCpuCores = stageSchedulingInfo.getMachineDefinition().getCpuCores();
// Setpoint
double setPoint = getSetpoint(sketches, numberOfCpuCores);
// ROPE
Tuple2<Double, Double> rope = Tuple.of(setPoint * 0.3, 0.0);
// Gain
long deltaT = stageSchedulingInfo.getScalingPolicy().getCoolDownSecs() / 30l;
//double minMaxMidPoint = stageSchedulingInfo.getScalingPolicy().getMax() - stageSchedulingInfo.getScalingPolicy().getMin();
double dampeningFactor = 0.33; // 0.4 caused a little oscillation too. We'll try 1/3 across each.
double kp = 1.0 / setPoint / deltaT * stageSchedulingInfo.getScalingPolicy().getMin(); //minMaxMidPoint * dampeningFactor;
double ki = 0.0 * dampeningFactor; // We don't want any "state" from integral gain right now.
double kd = 1.0 / setPoint / deltaT * stageSchedulingInfo.getScalingPolicy().getMin(); // minMaxMidPoint * dampeningFactor;
// TODO: Do we want to reset sketches, we need at least one day's values
//resetSketches(sketches);
return com.netflix.control.clutch.ClutchConfiguration.builder()
.metric(Clutch.Metric.RPS)
.setPoint(setPoint)
.kp(kp)
.ki(ki)
.kd(kd)
.minSize(stageSchedulingInfo.getScalingPolicy().getMin())
.maxSize(stageSchedulingInfo.getScalingPolicy().getMax())
.rope(rope)
.cooldownInterval(stageSchedulingInfo.getScalingPolicy().getCoolDownSecs())
.cooldownUnits(TimeUnit.SECONDS)
.build();
}
private void resetSketches(Map<Clutch.Metric, UpdateDoublesSketch> sketches) {
sketches.values().forEach(UpdateDoublesSketch::reset);
}
/**
* Implements the rules for determine if the setpoint is too low, which runs the job too cold.
* We are currently defining too cold as CPU or Network spending half their time below half of true max.
*
* @param sketches A map of metrics to skethces. Must contain CPU and NETWORK.
* @return A boolean indicating if the setpoint is too low and we are thus running the job too cold.
*/
private boolean isSetpointLow(Map<Clutch.Metric, UpdateDoublesSketch> sketches) {
double cpuMedian = sketches.get(Clutch.Metric.CPU).getQuantile(0.5);
double networkMedian = sketches.get(Clutch.Metric.NETWORK).getQuantile(0.5);
boolean cpuTooLow = cpuMedian < trueCpuMax.get() * 0.5;
boolean networkTooLow = networkMedian < trueNetworkMax.get() * 0.5;
if (cpuTooLow) {
logger.info("CPU running too cold for stage {} with median {} and max {}. Recommending increase in setPoint.", stageNumber, cpuMedian, trueCpuMax.get());
}
if (networkTooLow) {
logger.info("Network running too cold for stage {} with median {} and max {}. Recommending increase in setPoint.", stageNumber, networkMedian, trueNetworkMax.get());
}
return cpuTooLow || networkTooLow;
}
/**
* Implements the rules for determine if the setpoint is too high, which in turn runs the job too hot.
* We are currently defining a setpoint as too high if CPU or Network spend half their time over 80% of true max.
*
* @param sketches A map of metrics to skethces. Must contain CPU and NETWORK.
* @return A boolean indicating if the setpoint is too high and we are thus running the job too hot.
*/
private boolean isSetpointHigh(Map<Clutch.Metric, UpdateDoublesSketch> sketches) {
double cpuMedian = sketches.get(Clutch.Metric.CPU).getQuantile(0.5);
double networkMedian = sketches.get(Clutch.Metric.NETWORK).getQuantile(0.5);
// TODO: How do we ensure we're not just always operating in a tight range?
boolean cpuTooHigh = cpuMedian > trueCpuMax.get() * 0.8
&& cpuMedian > trueCpuMin.get() * 1.2;
boolean networkTooHigh = networkMedian > trueNetworkMax.get() * 0.8
&& networkMedian > trueNetworkMin.get() * 1.2;
if (cpuTooHigh) {
logger.info("CPU running too hot for stage {} with median {} and max {}. Recommending reduction in setPoint.", stageNumber, cpuMedian, trueCpuMax.get());
}
if (networkTooHigh) {
logger.info("Network running too hot for stage {} with median {} and max {}. Recommending reduction in setPoint.", stageNumber, cpuMedian, trueNetworkMax.get());
}
return cpuTooHigh || networkTooHigh;
}
/**
* Determines if a job is underprovisioned on cpu or network.
* We are currently definiing underprovisioned as spending 20% or more of our time above the
* provisioned resource amount.
*
* @param sketches A map of metrics to skethces. Must contain CPU and NETWORK.
* @return A boolean indicating if the job is underprovisioned.
*/
private boolean isUnderprovisioined(Map<Clutch.Metric, UpdateDoublesSketch> sketches) {
double provisionedCpuLimit = stageSchedulingInfo.getMachineDefinition().getCpuCores() * 100.0;
double provisionedNetworkLimit = stageSchedulingInfo.getMachineDefinition().getNetworkMbps() * 1024.0 * 1024.0;
double cpu = sketches.get(Clutch.Metric.CPU).getQuantile(0.8);
double network = sketches.get(Clutch.Metric.NETWORK).getQuantile(0.8);
// If we spend 20% or more of our time above the CPU provisioned limit
boolean cpuUnderProvisioned = cpu > provisionedCpuLimit;
boolean networkUnderProvisioned = network > provisionedNetworkLimit;
if (cpuUnderProvisioned) {
logger.error("CPU is underprovisioned! 80% percentile {}% is above provisioned {}%.", cpu, provisionedCpuLimit);
}
if (networkUnderProvisioned) {
logger.error("Network is underprovisioned! 80% percentile {}% is above provisioned {}%.", network, provisionedNetworkLimit);
}
return cpuUnderProvisioned || networkUnderProvisioned;
}
/**
* Performs bookkeeping on true maximum values.
* We need to do this because we reset the sketches constantly so their max is impacted by current setPoint.
*
* @param sketches A map of metrics to skethces. Must contain CPU and NETWORK.
*/
private void updateTrueMaxValues(Map<Clutch.Metric, UpdateDoublesSketch> sketches) {
double cpuMax = sketches.get(Clutch.Metric.CPU).getMaxValue();
double networkMax = sketches.get(Clutch.Metric.NETWORK).getMaxValue();
if (cpuMax > trueCpuMax.get()) {
trueCpuMax.set(cpuMax);
}
if (networkMax > trueNetworkMax.get()) {
trueNetworkMax.set(networkMax);
}
double cpuMin = sketches.get(Clutch.Metric.CPU).getMinValue();
double networkMin = sketches.get(Clutch.Metric.NETWORK).getMinValue();
if (cpuMin < trueCpuMin.get()) {
trueCpuMin.set(cpuMin);
}
if (networkMin < trueNetworkMin.get()) {
trueNetworkMin.set(networkMin);
}
}
}
| 8,469 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch/rps/RpsClutchConfigurationSelector.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.clutch.rps;
import com.netflix.control.clutch.Clutch;
import com.netflix.control.clutch.ClutchConfiguration;
import com.yahoo.sketches.quantiles.UpdateDoublesSketch;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.vavr.Function1;
import io.vavr.Tuple2;
import java.util.Map;
import java.util.concurrent.TimeUnit;
public class RpsClutchConfigurationSelector implements Function1<Map<Clutch.Metric, UpdateDoublesSketch>, ClutchConfiguration> {
private static final double DEFAULT_INTEGRAL_DECAY = 0.1;
private final Integer stageNumber;
private final StageSchedulingInfo stageSchedulingInfo;
private final io.mantisrx.server.worker.jobmaster.clutch.ClutchConfiguration customConfig;
private ClutchConfiguration prevConfig;
public RpsClutchConfigurationSelector(Integer stageNumber, StageSchedulingInfo stageSchedulingInfo, io.mantisrx.server.worker.jobmaster.clutch.ClutchConfiguration customConfig) {
this.stageNumber = stageNumber;
this.stageSchedulingInfo = stageSchedulingInfo;
this.customConfig = customConfig;
}
@Override
public ClutchConfiguration apply(Map<Clutch.Metric, UpdateDoublesSketch> sketches) {
double setPoint = getSetpoint(sketches);
Tuple2<Double, Double> rope = getRope().map(x -> x / 100.0 * setPoint, y -> y / 100.0 * setPoint);
// Gain - number of ticks within the cooldown period. This is the minimum number of times PID output will accumulate
// before an action is taken.
long deltaT = getCooldownSecs() / 30l;
double kp = 1.0 / Math.max(setPoint, 1.0) / Math.max(getCumulativeIntegralDivisor(getIntegralScaler(), deltaT), 1.0);
double ki = 0.0;
double kd = 1.0 / Math.max(setPoint, 1.0) / Math.max(getCumulativeIntegralDivisor(getIntegralScaler(), deltaT), 1.0);
ClutchConfiguration config = com.netflix.control.clutch.ClutchConfiguration.builder()
.metric(Clutch.Metric.RPS)
.setPoint(setPoint)
.kp(kp)
.ki(ki)
.kd(kd)
.integralDecay(getIntegralScaler())
.minSize(getMinSize())
.maxSize(getMaxSize())
.rope(rope)
.cooldownInterval(getCooldownSecs())
.cooldownUnits(TimeUnit.SECONDS)
.build();
// If config is similar to previous, don't return a new config which would trigger a PID reset.
if (isSimilarToPreviousConfig(config)) {
return prevConfig;
}
prevConfig = config;
return config;
}
private double getSetpoint(Map<Clutch.Metric, UpdateDoublesSketch> sketches) {
UpdateDoublesSketch rpsSketch = sketches.get(Clutch.Metric.RPS);
double setPoint = rpsSketch.getQuantile(getSetPointPercentile());
// Check if set point drifted too low due to distribution skewing lower.
if (rpsSketch.getQuantile(0.99) * getSetPointPercentile() > setPoint) {
setPoint = setPoint * 1.1;
}
return setPoint;
}
private double getSetPointPercentile() {
if (customConfig != null && customConfig.getRpsConfig().isDefined()) {
return customConfig.getRpsConfig().get().getSetPointPercentile() / 100.0;
}
return ClutchRpsPIDConfig.DEFAULT.getSetPointPercentile() / 100.0;
}
private Tuple2<Double, Double> getRope() {
if (customConfig != null && customConfig.getRpsConfig().isDefined()) {
return customConfig.getRpsConfig().get().getRope();
}
return ClutchRpsPIDConfig.DEFAULT.getRope();
}
private int getMinSize() {
if (customConfig != null && customConfig.getMinSize() > 0) {
return customConfig.getMinSize();
}
if (stageSchedulingInfo.getScalingPolicy() != null && stageSchedulingInfo.getScalingPolicy().getMin() > 0) {
return stageSchedulingInfo.getScalingPolicy().getMin();
}
return stageSchedulingInfo.getNumberOfInstances();
}
private int getMaxSize() {
if (customConfig != null && customConfig.getMaxSize() > 0) {
return customConfig.getMaxSize();
}
if (stageSchedulingInfo.getScalingPolicy() != null && stageSchedulingInfo.getScalingPolicy().getMax() > 0) {
return stageSchedulingInfo.getScalingPolicy().getMax();
}
return stageSchedulingInfo.getNumberOfInstances();
}
private long getCooldownSecs() {
if (customConfig != null && customConfig.getCooldownSeconds().isDefined()) {
return customConfig.getCooldownSeconds().get();
}
if (stageSchedulingInfo.getScalingPolicy() != null) {
return stageSchedulingInfo.getScalingPolicy().getCoolDownSecs();
}
return 0;
}
private double getIntegralScaler() {
if (customConfig != null && customConfig.getIntegralDecay().isDefined()) {
return 1.0 - customConfig.getIntegralDecay().get();
}
return 1.0 - DEFAULT_INTEGRAL_DECAY;
}
private boolean isSimilarToPreviousConfig(ClutchConfiguration curConfig) {
if (prevConfig == null) {
return false;
}
double prevSetPoint = prevConfig.getSetPoint();
double curSetPoint = curConfig.getSetPoint();
// Consider the config similar if setPoint is within 5%.
return curSetPoint >= prevSetPoint * 0.95 && curSetPoint <= prevSetPoint * 1.05;
}
private double getCumulativeIntegralDivisor(double integralScaler, long count) {
double result = 0.0;
for (int i = 0; i < count; i++) {
result = result * integralScaler + 1.0;
}
return result;
}
}
| 8,470 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch/rps/RpsScaleComputer.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.clutch.rps;
import com.netflix.control.clutch.ClutchConfiguration;
import com.netflix.control.clutch.IScaleComputer;
public class RpsScaleComputer implements IScaleComputer {
private final ClutchRpsPIDConfig rpsConfig;
public RpsScaleComputer(ClutchRpsPIDConfig rpsConfig) {
if (rpsConfig == null) {
rpsConfig = ClutchRpsPIDConfig.DEFAULT;
}
this.rpsConfig = rpsConfig;
}
public Double apply(ClutchConfiguration config, Long currentScale, Double delta) {
double scaleUpPct = rpsConfig.getScaleUpAbovePct() / 100.0;
double scaleDownPct = rpsConfig.getScaleDownBelowPct() / 100.0;
if (delta > -scaleDownPct && delta < scaleUpPct) {
return (double) currentScale;
}
if (delta >= scaleUpPct) {
delta = delta * rpsConfig.getScaleUpMultiplier();
}
if (delta <= -scaleDownPct) {
delta = delta * rpsConfig.getScaleDownMultiplier();
}
// delta is a percentage, actual increase/decrease is computed as percentage of current scale.
double scale = Math.round(currentScale + currentScale * delta);
scale = Math.min(config.getMaxSize(), Math.max(config.getMinSize(), scale));
return scale;
}
}
| 8,471 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch/rps/ClutchRpsPIDConfig.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.clutch.rps;
import io.mantisrx.server.worker.jobmaster.clutch.ClutchPIDConfig;
import io.vavr.Tuple;
import io.vavr.Tuple2;
import io.vavr.control.Option;
public class ClutchRpsPIDConfig extends ClutchPIDConfig {
public static final ClutchRpsPIDConfig DEFAULT = new ClutchRpsPIDConfig(0.0, Tuple.of(30.0, 0.0), 0.0, 0.0,
Option.of(75.0), Option.of(0.0), Option.of(0.0), Option.of(1.0), Option.of(1.0));
/**
* Percentile of RPS data points to use as set point. 99.0 means P99.
*/
public final double setPointPercentile;
/**
* Percentage threshold for scaling up. Use to delay scaling up during until a threshold is reached, effectively
* reducing the number of scaling activities. 10.0 means 10%.
*/
public final double scaleUpAbovePct;
/**
* Percentage threshold for scaling down. Use to delay scaling down during until a threshold is reached, effectively
* reducing the number of scaling activities. 10.0 means 10%.
*/
public final double scaleDownBelowPct;
/**
* Scale up multiplier. Use to artificially increase/decrease size of scale up. Can be used to reduce number of
* scaling activities. However, a large number can cause over provisioning and lead to oscillation.
*/
public final double scaleUpMultiplier;
/**
* Scale down multiplier. Use to artificially increase/decrease size of scale down. Can be used to reduce number of
* scaling activities. However, a large number can cause under provisioning and lead to oscillation.
*/
public final double scaleDownMultiplier;
@java.beans.ConstructorProperties( {"setPoint", "rope", "kp", "kd", "setPointPercentile", "scaleUpAbovePct", "scaleDownBelowPct", "scaleUpMultiplier", "scaleDownMultiplier"})
public ClutchRpsPIDConfig(double setPoint, Tuple2<Double, Double> rope, double kp, double kd,
Option<Double> setPointPercentile,
Option<Double> scaleUpAbovePct,
Option<Double> scaleDownBelowPct,
Option<Double> scaleUpMultiplier,
Option<Double> scaleDownMultiplier) {
super(setPoint, rope == null ? DEFAULT.getRope() : rope, kp, kd);
this.setPointPercentile = setPointPercentile.getOrElse(() -> DEFAULT.getSetPointPercentile());
this.scaleUpAbovePct = scaleUpAbovePct.getOrElse(() -> DEFAULT.getScaleUpAbovePct());
this.scaleDownBelowPct = scaleDownBelowPct.getOrElse(() -> DEFAULT.getScaleDownBelowPct());
this.scaleUpMultiplier = scaleUpMultiplier.getOrElse(() -> DEFAULT.getScaleUpMultiplier());
this.scaleDownMultiplier = scaleDownMultiplier.getOrElse(() -> DEFAULT.getScaleDownMultiplier());
}
public double getSetPointPercentile() {
return setPointPercentile;
}
public double getScaleUpAbovePct() {
return scaleUpAbovePct;
}
public double getScaleDownBelowPct() {
return scaleDownBelowPct;
}
public double getScaleUpMultiplier() {
return scaleUpMultiplier;
}
public double getScaleDownMultiplier() {
return scaleDownMultiplier;
}
public boolean equals(Object o) {
if (o == this) return true;
if (!(o instanceof ClutchRpsPIDConfig)) return false;
final ClutchRpsPIDConfig other = (ClutchRpsPIDConfig) o;
if (!super.equals(o)) return false;
if (Double.compare(this.getSetPointPercentile(), other.getSetPointPercentile()) != 0) return false;
if (Double.compare(this.getScaleUpAbovePct(), other.getScaleUpAbovePct()) != 0) return false;
if (Double.compare(this.getScaleDownBelowPct(), other.getScaleDownBelowPct()) != 0) return false;
if (Double.compare(this.getScaleUpMultiplier(), other.getScaleUpMultiplier()) != 0) return false;
if (Double.compare(this.getScaleDownMultiplier(), other.getScaleDownMultiplier()) != 0) return false;
return true;
}
public int hashCode() {
final int PRIME = 59;
int result = super.hashCode();
final long $setPointPercentile = Double.doubleToLongBits(this.getSetPointPercentile());
result = result * PRIME + (int) ($setPointPercentile >>> 32 ^ $setPointPercentile);
final long $scaleUpAbovePct = Double.doubleToLongBits(this.getScaleUpAbovePct());
result = result * PRIME + (int) ($scaleUpAbovePct >>> 32 ^ $scaleUpAbovePct);
final long $scaleDownBelowPct = Double.doubleToLongBits(this.getScaleDownBelowPct());
result = result * PRIME + (int) ($scaleDownBelowPct >>> 32 ^ $scaleDownBelowPct);
final long $scaleUpMultiplier = Double.doubleToLongBits(this.getScaleUpMultiplier());
result = result * PRIME + (int) ($scaleUpMultiplier >>> 32 ^ $scaleUpMultiplier);
final long $scaleDownMultiplier = Double.doubleToLongBits(this.getScaleDownMultiplier());
result = result * PRIME + (int) ($scaleDownMultiplier >>> 32 ^ $scaleDownMultiplier);
return result;
}
public String toString() {
return "ClutchRPSPIDConfig(setPoint=" + this.getSetPoint() + ", rope=" + this.getRope() + ", kp=" + this.getKp() + ", kd=" + this.getKd() +
", setPointPercentile=" + this.getSetPointPercentile() +
", scaleUpAbovePct=" + this.getScaleUpAbovePct() + ", scaleDownBelowPct=" + this.getScaleDownBelowPct() +
", scaleUpMultiplier=" + this.getScaleUpMultiplier() + ", scaleDownMultiplier=" + this.getScaleDownMultiplier() + ")";
}
}
| 8,472 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/jobmaster/clutch/rps/RpsMetricComputer.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.jobmaster.clutch.rps;
import com.netflix.control.clutch.Clutch;
import com.netflix.control.clutch.ClutchConfiguration;
import com.netflix.control.clutch.IRpsMetricComputer;
import java.util.Map;
public class RpsMetricComputer implements IRpsMetricComputer {
public Double apply(ClutchConfiguration config, Map<Clutch.Metric, Double> metrics) {
double rps = metrics.get(Clutch.Metric.RPS);
double lag = metrics.get(Clutch.Metric.LAG);
double sourceDrops = metrics.get(Clutch.Metric.SOURCEJOB_DROP);
double drops = metrics.get(Clutch.Metric.DROPS) / 100.0 * rps;
return rps + lag + sourceDrops + drops;
}
}
| 8,473 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/scheduling/WorkerIndexChange.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.scheduling;
import io.mantisrx.server.core.WorkerHost;
public class WorkerIndexChange {
private int workerIndex;
private WorkerHost newState;
private WorkerHost oldState;
public WorkerIndexChange(int workerIndex, WorkerHost newState,
WorkerHost oldState) {
this.workerIndex = workerIndex;
this.newState = newState;
this.oldState = oldState;
}
public WorkerHost getNewState() {
return newState;
}
public WorkerHost getOldState() {
return oldState;
}
public int getWorkerIndex() {
return workerIndex;
}
@Override
public String toString() {
return "WorkerIndexChange [workerIndex=" + workerIndex + ", newState="
+ newState + ", oldState=" + oldState + "]";
}
}
| 8,474 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/scheduling/JobSchedulingTracker.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.scheduling;
import io.mantisrx.runtime.MantisJobState;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.mantisrx.server.core.WorkerAssignments;
import io.mantisrx.server.core.WorkerHost;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Func1;
import rx.observables.GroupedObservable;
public class JobSchedulingTracker {
private static final Logger logger = LoggerFactory.getLogger(JobSchedulingTracker.class);
private Observable<JobSchedulingInfo> schedulingChangesForJobId;
public JobSchedulingTracker(Observable<JobSchedulingInfo> schedulingChangesForJobId) {
this.schedulingChangesForJobId = schedulingChangesForJobId;
}
public Observable<WorkerIndexChange> startedWorkersPerIndex(int stageNumber) {
Observable<WorkerIndexChange> workerIndexChanges = workerIndexChanges(stageNumber);
return workerIndexChanges
.filter(new Func1<WorkerIndexChange, Boolean>() {
@Override
public Boolean call(WorkerIndexChange newWorkerChange) {
return (newWorkerChange.getNewState().getState()
== MantisJobState.Started);
}
});
}
public Observable<WorkerIndexChange> workerIndexChanges(int stageNumber) {
return
workerChangesForStage(stageNumber, schedulingChangesForJobId)
// flatmap over all numbered workers
.flatMap(new Func1<WorkerAssignments, Observable<WorkerHost>>() {
@Override
public Observable<WorkerHost> call(WorkerAssignments assignments) {
logger.info("Received scheduling update from master: " + assignments);
return Observable.from(assignments.getHosts().values());
}
})
// group by index
.groupBy(new Func1<WorkerHost, Integer>() {
@Override
public Integer call(WorkerHost workerHost) {
return workerHost.getWorkerIndex();
}
})
//
.flatMap(new Func1<GroupedObservable<Integer, WorkerHost>, Observable<WorkerIndexChange>>() {
@Override
public Observable<WorkerIndexChange> call(
final GroupedObservable<Integer, WorkerHost> workerIndexGroup) {
// seed sequence, to support buffer by 2
return
workerIndexGroup.startWith(new WorkerHost(null, -1, null, null, -1, -1, -1))
.buffer(2, 1) // create pair to compare prev and curr
.filter(new Func1<List<WorkerHost>, Boolean>() {
@Override
public Boolean call(List<WorkerHost> currentAndPrevious) {
if (currentAndPrevious.size() < 2) {
return false; // not a pair, last element
// has already been evaluated on last iteration
// for example: 1,2,3,4,5 = (1,2),(2,3),(3,4),(4,5),(5)
}
WorkerHost previous = currentAndPrevious.get(0);
WorkerHost current = currentAndPrevious.get(1);
return (previous.getWorkerNumber() != current.getWorkerNumber());
}
})
.map(new Func1<List<WorkerHost>, WorkerIndexChange>() {
@Override
public WorkerIndexChange call(List<WorkerHost> list) {
return new WorkerIndexChange(workerIndexGroup.getKey(),
list.get(1), list.get(0));
}
});
}
});
}
private Observable<WorkerAssignments> workerChangesForStage(final int stageNumber,
Observable<JobSchedulingInfo> schedulingUpdates) {
return schedulingUpdates
// pull out worker assignments from jobSchedulingInfo
.flatMap(new Func1<JobSchedulingInfo, Observable<WorkerAssignments>>() {
@Override
public Observable<WorkerAssignments> call(JobSchedulingInfo schedulingChange) {
Map<Integer, WorkerAssignments> assignments = schedulingChange.getWorkerAssignments();
if (assignments != null && !assignments.isEmpty()) {
return Observable.from(assignments.values());
} else {
return Observable.empty();
}
}
})
// return only changes from previous stage
.filter(new Func1<WorkerAssignments, Boolean>() {
@Override
public Boolean call(WorkerAssignments assignments) {
return (assignments.getStage() == stageNumber);
}
});
}
}
| 8,475 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/config/ConfigurationFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.config;
import io.mantisrx.runtime.loader.config.WorkerConfiguration;
/**
* An implementation of this class should return an instance of io.mantisrx.server.master.config.MasterConfiguration.
* We create this factory because it's possible that the logic of creating a io.mantisrx.server.master.config.MasterConfiguration
* can change depending on the user or environment.
* <p>
* see io.mantisrx.server.master.config.ConfigurationProvider
*/
public interface ConfigurationFactory {
WorkerConfiguration getConfig();
}
| 8,476 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/config/ConfigurationProvider.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.config;
import io.mantisrx.runtime.loader.config.WorkerConfiguration;
/**
* Provides static and global access to configuration objects. The method io.mantisrx.server.master.config.ConfigurationProvider#initialize(ConfigurationFactory)
* must be called before this class can be used.
* <p>
* see io.mantisrx.server.worker.config.ConfigurationFactory
*/
public class ConfigurationProvider {
private static ConfigurationFactory factory;
public static void initialize(ConfigurationFactory aFactory) {
factory = aFactory;
}
/**
* @return a io.mantisrx.server.master.config.MasterConfiguration object.
*
* @throws java.lang.IllegalStateException if the method io.mantisrx.server.master.config.ConfigurationProvider#initialize(ConfigurationFactory) is not
* called yet.
*/
public static WorkerConfiguration getConfig() {
if (factory == null) {
throw new IllegalStateException(String.format("%s#initialize() must be called first. ", ConfigurationFactory.class.getName()));
}
return factory.getConfig();
}
}
| 8,477 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/config/StaticPropertiesConfigurationFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.config;
import io.mantisrx.runtime.loader.config.WorkerConfiguration;
import io.mantisrx.runtime.loader.config.WorkerConfigurationUtils;
import java.util.Properties;
public class StaticPropertiesConfigurationFactory implements ConfigurationFactory {
private final WorkerConfiguration config;
public StaticPropertiesConfigurationFactory(Properties props) {
config = WorkerConfigurationUtils.frmProperties(props, WorkerConfiguration.class);
}
@Override
public WorkerConfiguration getConfig() {
return this.config;
}
@Override
public String toString() {
return "StaticPropertiesConfigurationFactory{" +
", config=" + config +
'}';
}
}
| 8,478 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/mesos/VirtualMachineTaskStatus.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.mesos;
public class VirtualMachineTaskStatus {
private String taskId;
private TYPE type;
private String message;
public VirtualMachineTaskStatus(String taskId, TYPE type, String message) {
this.taskId = taskId;
this.type = type;
this.message = message;
}
public String getTaskId() {
return taskId;
}
public String getMessage() {
return message;
}
public TYPE getType() {
return type;
}
public enum TYPE {
STARTED, COMPLETED, ERROR;
}
}
| 8,479 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/mesos/MesosExecutorCallbackHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.mesos;
import io.mantisrx.common.JsonSerializer;
import io.mantisrx.server.core.ExecuteStageRequest;
import io.mantisrx.server.core.WrappedExecuteStageRequest;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.mesos.Executor;
import org.apache.mesos.ExecutorDriver;
import org.apache.mesos.Protos;
import org.apache.mesos.Protos.ExecutorInfo;
import org.apache.mesos.Protos.FrameworkInfo;
import org.apache.mesos.Protos.SlaveInfo;
import org.apache.mesos.Protos.TaskID;
import org.apache.mesos.Protos.TaskInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.functions.Action0;
import rx.subjects.PublishSubject;
public class MesosExecutorCallbackHandler implements Executor {
private static final Logger logger = LoggerFactory.getLogger(MesosExecutorCallbackHandler.class);
private Observer<WrappedExecuteStageRequest> executeStageRequestObserver;
private final JsonSerializer serializer = new JsonSerializer();
public MesosExecutorCallbackHandler(Observer<WrappedExecuteStageRequest> executeStageRequestObserver) {
this.executeStageRequestObserver = executeStageRequestObserver;
}
@Override
public void disconnected(ExecutorDriver arg0) {
// TODO Auto-generated method stub
}
@Override
public void error(ExecutorDriver arg0, String msg) {
// TODO Auto-generated method stub
logger.error(msg);
}
@Override
public void frameworkMessage(ExecutorDriver arg0, byte[] arg1) {
// TODO Auto-generated method stub
}
@Override
public void killTask(ExecutorDriver arg0, TaskID task) {
logger.info("Executor going to kill task " + task.getValue());
executeStageRequestObserver.onCompleted();
waitAndExit();
}
private void waitAndExit() {
// Allow some time for clean up and the completion report to be sent out before exiting.
// Until we define a better way to exit than to assume that the time we wait here is
// sufficient before a hard exit, we will live with it.
Thread t = new Thread() {
@Override
public void run() {
try {sleep(2000);} catch (InterruptedException ie) {}
System.exit(0);
}
};
t.setDaemon(true);
t.start();
}
private WrappedExecuteStageRequest createExecuteStageRequest(TaskInfo task) {
// TODO
try {
byte[] jsonBytes = task.getData().toByteArray();
logger.info("Received request {}", new String(jsonBytes));
return new WrappedExecuteStageRequest(
PublishSubject.create(),
serializer.fromJson(jsonBytes, ExecuteStageRequest.class));
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return null;
}
private void sendLaunchError(ExecutorDriver driver, final TaskInfo task) {
driver.sendStatusUpdate(Protos.TaskStatus.newBuilder()
.setTaskId(task.getTaskId())
.setState(Protos.TaskState.TASK_FAILED).build());
waitAndExit();
}
private void setupRequestFailureHandler(long waitSeconds, Observable<Boolean> requestObservable,
final Action0 errorHandler) {
requestObservable
.buffer(waitSeconds, TimeUnit.SECONDS, 1)
.take(1)
.subscribe(new Observer<List<Boolean>>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
logger.error("onError called for request failure handler");
errorHandler.call();
}
@Override
public void onNext(List<Boolean> booleans) {
logger.info("onNext called for request failure handler with items: " +
((booleans == null) ? "-1" : booleans.size()));
if ((booleans == null) || booleans.isEmpty())
errorHandler.call();
}
});
}
@Override
public void launchTask(final ExecutorDriver driver, final TaskInfo task) {
WrappedExecuteStageRequest request = createExecuteStageRequest(task);
logger.info("Worker for task [" + task.getTaskId().getValue() + "] with startTimeout=" +
request.getRequest().getTimeoutToReportStart());
setupRequestFailureHandler(request.getRequest().getTimeoutToReportStart(), request.getRequestSubject(),
new Action0() {
@Override
public void call() {
sendLaunchError(driver, task);
}
});
executeStageRequestObserver.onNext(request);
}
@Override
public void registered(ExecutorDriver arg0, ExecutorInfo arg1,
FrameworkInfo arg2, SlaveInfo arg3) {
// TODO Auto-generated method stub
}
@Override
public void reregistered(ExecutorDriver arg0, SlaveInfo arg1) {
// TODO Auto-generated method stub
}
@Override
public void shutdown(ExecutorDriver arg0) {
// TODO Auto-generated method stub
}
}
| 8,480 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/mesos/MesosMetricsCollector.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.mesos;
import io.mantisrx.runtime.loader.config.MetricsCollector;
import io.mantisrx.runtime.loader.config.Usage;
import io.mantisrx.runtime.loader.config.WorkerConfiguration;
import io.mantisrx.shaded.com.google.common.base.Strings;
import io.netty.buffer.ByteBuf;
import io.reactivx.mantis.operators.OperatorOnErrorResumeNextViaFunction;
import java.nio.charset.Charset;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import mantis.io.reactivex.netty.RxNetty;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse;
import org.json.JSONArray;
import org.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Func1;
import rx.functions.Func2;
/**
* Mesos implementation of MetricsCollector that collects metrics using the statistics endpoint on the mesos agent.
* <a href="https://mesos.readthedocs.io/en/latest/endpoints/slave/monitor/statistics.json/">mesos statics endpoint link</a>
*/
public class MesosMetricsCollector implements MetricsCollector {
private static final String MESOS_TASK_EXECUTOR_ID_KEY = "MESOS_EXECUTOR_ID";
private static final Logger logger = LoggerFactory.getLogger(MesosMetricsCollector.class);
private static final long GET_TIMEOUT_SECS = 5;
private static final int MAX_REDIRECTS = 10;
private final int slavePort;
private final String taskId;
private final Func1<Observable<? extends Throwable>, Observable<?>> retryLogic = attempts -> attempts
.zipWith(Observable.range(1, 3), (Func2<Throwable, Integer, Integer>) (t1, integer) -> integer)
.flatMap((Func1<Integer, Observable<?>>) integer -> {
long delay = 2L;
logger.info(": retrying conx after sleeping for " + delay + " secs");
return Observable.timer(delay, TimeUnit.SECONDS);
});
@SuppressWarnings("unused")
public static MesosMetricsCollector valueOf(Properties properties) {
int slavePort = Integer.parseInt(properties.getProperty("mantis.agent.mesos.slave.port", "5051"));
String taskId = System.getenv(MESOS_TASK_EXECUTOR_ID_KEY);
return new MesosMetricsCollector(slavePort, taskId);
}
public static MesosMetricsCollector valueOf(WorkerConfiguration workerConfiguration) {
int slavePort = workerConfiguration.getMesosSlavePort();
String taskId = System.getenv(MESOS_TASK_EXECUTOR_ID_KEY);
return new MesosMetricsCollector(slavePort, taskId);
}
MesosMetricsCollector(int slavePort, String taskId) {
logger.info("Creating MesosMetricsCollector to port {} of taskId: {}", slavePort, taskId);
if (Strings.isNullOrEmpty(taskId)) {
// only log error to avoid breaking tests.
logger.error("Invalid task id for MesosMetricsCollector");
}
this.slavePort = slavePort;
this.taskId = taskId;
}
private String getUsageJson() {
String usageEndpoint = "monitor/statistics.json";
final String url = "http://localhost:" + slavePort + "/" + usageEndpoint;
return RxNetty
.createHttpRequest(HttpClientRequest.createGet(url), new HttpClient.HttpClientConfig.Builder()
.setFollowRedirect(true).followRedirect(MAX_REDIRECTS).build())
.lift(new OperatorOnErrorResumeNextViaFunction<>(t -> Observable.error(t)))
.timeout(GET_TIMEOUT_SECS, TimeUnit.SECONDS)
.retryWhen(retryLogic)
.flatMap((Func1<HttpClientResponse<ByteBuf>, Observable<ByteBuf>>) r -> r.getContent())
.map(o -> o.toString(Charset.defaultCharset()))
.doOnError(throwable -> logger.warn("Can't get resource usage from mesos slave endpoint (" + url + ") - " + throwable.getMessage(), throwable))
.toBlocking()
.firstOrDefault("");
}
@Override
public Usage get() {
return getCurentUsage(taskId, getUsageJson());
}
static Usage getCurentUsage(String taskId, String usageJson) {
if (usageJson == null || usageJson.isEmpty()) {
logger.warn("Empty usage on task {}", taskId);
return null;
}
JSONArray array = new JSONArray(usageJson);
if (array.length() == 0)
return null;
JSONObject obj = null;
for (int i = 0; i < array.length(); i++) {
JSONObject executor = array.getJSONObject(i);
if (executor != null) {
String id = executor.optString("executor_id");
if (id != null && id.equals(taskId)) {
obj = executor.getJSONObject("statistics");
break;
}
}
}
if (obj == null)
return null;
double cpus_limit = obj.optDouble("cpus_limit");
if (Double.isNaN(cpus_limit)) {
cpus_limit = 0.0;
}
double cpus_system_time_secs = obj.optDouble("cpus_system_time_secs");
if (Double.isNaN(cpus_system_time_secs)) {
logger.warn("Didn't get cpus_system_time_secs from mesos stats");
cpus_system_time_secs = 0.0;
}
double cpus_user_time_secs = obj.optDouble("cpus_user_time_secs");
if (Double.isNaN(cpus_user_time_secs)) {
logger.warn("Didn't get cpus_user_time_secs from mesos stats");
cpus_user_time_secs = 0.0;
}
// Also, cpus_throttled_time_secs may be useful to notice when job is throttled, will look into it later
double mem_rss_bytes = obj.optDouble("mem_rss_bytes");
if (Double.isNaN(mem_rss_bytes)) {
logger.warn("Couldn't get mem_rss_bytes from mesos stats");
mem_rss_bytes = 0.0;
}
double mem_anon_bytes = obj.optDouble("mem_anon_bytes");
if (Double.isNaN(mem_anon_bytes)) {
mem_anon_bytes = mem_rss_bytes;
}
double mem_limit = obj.optDouble("mem_limit_bytes");
if (Double.isNaN(mem_limit))
mem_limit = 0.0;
double network_read_bytes = obj.optDouble("net_rx_bytes");
if (Double.isNaN(network_read_bytes))
network_read_bytes = 0.0;
double network_write_bytes = obj.optDouble("net_tx_bytes");
if (Double.isNaN(network_write_bytes))
network_write_bytes = 0.0;
return new Usage(cpus_limit, cpus_system_time_secs, cpus_user_time_secs, mem_limit, mem_rss_bytes, mem_anon_bytes,
network_read_bytes, network_write_bytes);
}
}
| 8,481 |
0 | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker | Create_ds/mantis/mantis-server/mantis-server-worker/src/main/java/io/mantisrx/server/worker/mesos/VirualMachineWorkerServiceMesosImpl.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.server.worker.mesos;
import io.mantisrx.server.core.BaseService;
import io.mantisrx.server.core.WrappedExecuteStageRequest;
import io.mantisrx.server.worker.VirtualMachineWorkerService;
import io.mantisrx.server.worker.mesos.VirtualMachineTaskStatus.TYPE;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import org.apache.mesos.MesosExecutorDriver;
import org.apache.mesos.Protos;
import org.apache.mesos.Protos.TaskID;
import org.apache.mesos.Protos.TaskState;
import org.apache.mesos.Protos.TaskStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Observer;
import rx.functions.Action1;
public class VirualMachineWorkerServiceMesosImpl extends BaseService implements VirtualMachineWorkerService {
private static final Logger logger = LoggerFactory.getLogger(VirualMachineWorkerServiceMesosImpl.class);
private MesosExecutorDriver mesosDriver;
private ExecutorService executor;
private Observer<WrappedExecuteStageRequest> executeStageRequestObserver;
private Observable<VirtualMachineTaskStatus> vmTaskStatusObservable;
public VirualMachineWorkerServiceMesosImpl(Observer<WrappedExecuteStageRequest> executeStageRequestObserver,
Observable<VirtualMachineTaskStatus> vmTaskStatusObservable) {
this.executeStageRequestObserver = executeStageRequestObserver;
this.vmTaskStatusObservable = vmTaskStatusObservable;
executor = Executors.newSingleThreadExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r, "vm_worker_mesos_executor_thread");
t.setDaemon(true);
return t;
}
});
}
@Override
public void start() {
logger.info("Registering Mantis Worker with Mesos executor callbacks");
mesosDriver = new MesosExecutorDriver(new MesosExecutorCallbackHandler(executeStageRequestObserver));
// launch driver on background thread
logger.info("launch driver on background thread");
executor.execute(new Runnable() {
@Override
public void run() {
try {
mesosDriver.run();
} catch (Exception e) {
logger.error("Failed to register Mantis Worker with Mesos executor callbacks", e);
}
}
});
// subscribe to vm task updates on current thread
logger.info("subscribe to vm task updates on current thread");
vmTaskStatusObservable.subscribe(new Action1<VirtualMachineTaskStatus>() {
@Override
public void call(VirtualMachineTaskStatus vmTaskStatus) {
TYPE type = vmTaskStatus.getType();
if (type == TYPE.COMPLETED) {
Protos.Status status = mesosDriver.sendStatusUpdate(TaskStatus.newBuilder()
.setTaskId(TaskID.newBuilder().setValue(vmTaskStatus.getTaskId()).build())
.setState(TaskState.TASK_FINISHED).build());
logger.info("Sent COMPLETED state to mesos, driver status=" + status);
} else if (type == TYPE.STARTED) {
Protos.Status status = mesosDriver.sendStatusUpdate(TaskStatus.newBuilder()
.setTaskId(TaskID.newBuilder().setValue(vmTaskStatus.getTaskId()).build())
.setState(TaskState.TASK_RUNNING).build());
logger.info("Sent RUNNING state to mesos, driver status=" + status);
}
}
});
}
@Override
public void shutdown() {
logger.info("Unregistering Mantis Worker with Mesos executor callbacks");
mesosDriver.stop();
executor.shutdown();
}
@Override
public void enterActiveMode() {}
}
| 8,482 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/test/java/io/mantisrx/connector/publish | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/test/java/io/mantisrx/connector/publish/core/QueryRegistryTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.core;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import io.mantisrx.publish.proto.MantisServerSubscription;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
class QueryRegistryTest {
@Test
void registerQueryTest() {
try {
QueryRegistry queryRegistry = new QueryRegistry.Builder().build();
fail();
} catch (IllegalArgumentException ignored) {
}
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = QueryRegistry.ANY;
try {
queryRegistry.registerQuery(targetApp, null, "true");
fail();
} catch (Exception ignored) {
}
try {
queryRegistry.registerQuery(targetApp, "subId", null);
fail();
} catch (Exception ignored) {
}
queryRegistry.registerQuery("myApp", "subId", "true");
queryRegistry.registerQuery("myApp2", "subId", "false");
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp("myApp");
assertEquals(1, currentSubs.size());
List<MantisServerSubscription> currentSubs2 = queryRegistry.getCurrentSubscriptionsForApp("myApp2");
assertEquals(1, currentSubs2.size());
Map<String, List<MantisServerSubscription>> allSubscriptions = queryRegistry.getAllSubscriptions();
assertEquals(2, allSubscriptions.size());
assertTrue(allSubscriptions.containsKey("myApp"));
assertTrue(allSubscriptions.containsKey("myApp2"));
}
@Test
void registerQueryForAnyLookupSpecificAppTest() {
try {
QueryRegistry queryRegistry = new QueryRegistry.Builder().build();
fail();
} catch (IllegalArgumentException ignored) { }
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = QueryRegistry.ANY;
queryRegistry.registerQuery(targetApp, "subId", "true");
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp("myApp");
assertEquals(1, currentSubs.size());
}
@Test
void registerQueryForAppLookupAnyTest() {
try {
QueryRegistry queryRegistry = new QueryRegistry.Builder().build();
fail();
} catch (IllegalArgumentException ignored) { }
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = QueryRegistry.ANY;
queryRegistry.registerQuery("myApp", "subId", "true");
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp);
assertEquals(0, currentSubs.size());
}
@Test
@Disabled("time-based, non-deterministic")
void deregisterQueryTest() throws InterruptedException {
try {
QueryRegistry queryRegistry = new QueryRegistry.Builder().build();
fail();
} catch (IllegalArgumentException ignored) { }
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = "myapp";
try {
queryRegistry.registerQuery(targetApp, null, "true");
fail();
} catch (Exception ignored) {
}
try {
queryRegistry.registerQuery(targetApp, "subId", null);
fail();
} catch (Exception ignored) {
}
queryRegistry.registerQuery(targetApp, "subId", "true");
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp);
assertEquals(1, currentSubs.size());
queryRegistry.deregisterQuery(targetApp, "subId", "true");
Thread.sleep(500);
currentSubs = queryRegistry.getCurrentSubscriptionsForApp(QueryRegistry.ANY);
assertEquals(0, currentSubs.size());
}
@Test
@Disabled("time-based, non-deterministic")
void registerIdenticalQueryGetsDedupedTest() {
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = "myApp";
int concurrency = 5;
CountDownLatch latch = new CountDownLatch(1);
CountDownLatch endLatch = new CountDownLatch(concurrency);
Runnable task = () -> {
try {
latch.await();
queryRegistry.registerQuery(targetApp, "subId", "true");
endLatch.countDown();
} catch (InterruptedException e) {
e.printStackTrace();
}
};
ExecutorService executorService = Executors.newFixedThreadPool(concurrency);
for (int i = 0; i < concurrency; i++) {
executorService.submit(task);
}
latch.countDown();
try {
endLatch.await();
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp);
assertEquals(1, currentSubs.size());
assertEquals("myPrefix_subId", currentSubs.get(0).getSubscriptionId());
} catch (InterruptedException e) {
e.printStackTrace();
fail();
}
}
@Test
@Disabled("time-based, non-deterministic")
void registerIdenticalQueryRemovalTest() throws InterruptedException {
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = "myApp";
int concurrency = 5;
CountDownLatch latch = new CountDownLatch(1);
CountDownLatch endLatch = new CountDownLatch(concurrency);
CountDownLatch removeQueryEndLatch = new CountDownLatch(concurrency - 1);
Runnable addQueryTask = () -> {
try {
latch.await();
queryRegistry.registerQuery(targetApp, "subId", "true");
endLatch.countDown();
} catch (InterruptedException e) {
e.printStackTrace();
}
};
Runnable removeQueryTask = () -> {
try {
latch.await();
queryRegistry.deregisterQuery(targetApp, "subId", "true");
removeQueryEndLatch.countDown();
} catch (InterruptedException ignored) {
}
};
ExecutorService executorService = Executors.newFixedThreadPool(concurrency * 2);
for (int i = 0; i < concurrency; i++) {
executorService.submit(addQueryTask);
}
for (int i = 0; i < concurrency - 1; i++) {
executorService.submit(removeQueryTask);
}
latch.countDown();
removeQueryEndLatch.await();
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp);
assertEquals(1, currentSubs.size());
assertEquals("myPrefix_subId", currentSubs.get(0).getSubscriptionId());
}
@Test
@Disabled("time-based, non-deterministic")
void registerQueryMultipleAppsRemovalTest() throws InterruptedException {
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = "myApp";
String targetApp2 = "myApp2";
int concurrency = 5;
CountDownLatch latch = new CountDownLatch(1);
CountDownLatch endLatch = new CountDownLatch(concurrency);
CountDownLatch removeQueryEndLatch = new CountDownLatch(concurrency - 1);
Runnable addQueryTask = () -> {
try {
latch.await();
queryRegistry.registerQuery(targetApp, "subId", "true");
endLatch.countDown();
} catch (InterruptedException ignored) {
}
};
Runnable addQueryTask2 = () -> {
try {
latch.await();
queryRegistry.registerQuery(targetApp2, "subId", "true");
endLatch.countDown();
} catch (InterruptedException ignored) {
}
};
Runnable removeQueryTask = () -> {
try {
latch.await();
queryRegistry.deregisterQuery(targetApp, "subId", "true");
removeQueryEndLatch.countDown();
} catch (InterruptedException ignored) {
}
};
Runnable removeQueryTask2 = () -> {
try {
latch.await();
queryRegistry.deregisterQuery(targetApp2, "subId", "true");
removeQueryEndLatch.countDown();
} catch (InterruptedException ignored) {
}
};
ExecutorService executorService = Executors.newFixedThreadPool(concurrency * 2);
for (int i = 0; i < concurrency; i++) {
executorService.submit(addQueryTask);
executorService.submit(addQueryTask2);
}
for (int i = 0; i < concurrency - 1; i++) {
executorService.submit(removeQueryTask);
executorService.submit(removeQueryTask2);
}
latch.countDown();
removeQueryEndLatch.await();
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp);
assertEquals(1, currentSubs.size());
List<MantisServerSubscription> currentSubs2 = queryRegistry.getCurrentSubscriptionsForApp(targetApp2);
assertEquals(1, currentSubs2.size());
assertEquals("myPrefix_subId", currentSubs.get(0).getSubscriptionId());
}
}
| 8,483 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/test/java/io/mantisrx/connector/publish | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/test/java/io/mantisrx/connector/publish/core/EventFilterTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.core;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import com.mantisrx.common.utils.MantisSourceJobConstants;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.jupiter.api.Test;
import rx.functions.Func1;
public class EventFilterTest {
private ObjectMapper mapper = new ObjectMapper();
@Test
public void missingClientIdFails() {
try {
new EventFilter(null);
fail();
} catch (Exception ignored) {
}
}
@Test
public void basicFilterTest() throws JsonProcessingException {
String clientId = "myClientId";
EventFilter filter = new EventFilter(clientId);
Map<String, List<String>> params = new HashMap<>();
List<String> subIdParam = new ArrayList<>();
subIdParam.add("mySubId");
params.put(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME, subIdParam);
Func1<String, Boolean> materializedFilter = filter.call(params);
List<String> matchedClients = new ArrayList<>();
matchedClients.add(clientId + "_" + "mySubId");
matchedClients.add(clientId + "_" + "BlahSubId");
Map<String, Object> payLoad = new HashMap<>();
payLoad.put("ts", System.currentTimeMillis());
payLoad.put("matchedClients", matchedClients);
payLoad.put("type", "EVENT");
String payloadStr = mapper.writeValueAsString(payLoad);
assertTrue(materializedFilter.call(payloadStr));
List<String> matchedClients2 = new ArrayList<>();
matchedClients.add(clientId + "_" + "mySubId2");
matchedClients.add(clientId + "_" + "BlahSubId");
payLoad = new HashMap<>();
payLoad.put("ts", System.currentTimeMillis());
payLoad.put("matchedClients", matchedClients2);
payLoad.put("type", "EVENT");
payloadStr = mapper.writeValueAsString(payLoad);
assertFalse(materializedFilter.call(payloadStr));
}
@Test
public void basicEmptyEventFilterTest() throws JsonProcessingException {
String clientId = "myClientId";
EventFilter filter = new EventFilter(clientId);
Map<String, List<String>> params = new HashMap<>();
List<String> subIdParam = new ArrayList<>();
subIdParam.add("mySubId");
params.put(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME, subIdParam);
Func1<String, Boolean> materializedFilter = filter.call(params);
List<String> matchedClients = new ArrayList<>();
matchedClients.add(clientId + "_" + "mySubId");
matchedClients.add(clientId + "_" + "BlahSubId");
Map<String, Object> payLoad = new HashMap<>();
String payloadStr = mapper.writeValueAsString(payLoad);
assertFalse(materializedFilter.call(payloadStr));
try {
assertFalse(materializedFilter.call(null));
} catch (Exception e) {
fail();
}
}
@Test
public void missingSubIdParamAlwaysPasses() throws JsonProcessingException {
String clientId = "myClientId";
EventFilter filter = new EventFilter(clientId);
Map<String, List<String>> params = new HashMap<>();
Func1<String, Boolean> materializedFilter = filter.call(params);
List<String> matchedClients = new ArrayList<>();
matchedClients.add(clientId + "_" + "mySubId");
matchedClients.add(clientId + "_" + "BlahSubId");
Map<String, Object> payLoad = new HashMap<>();
payLoad.put("ts", System.currentTimeMillis());
payLoad.put("matchedClients", matchedClients);
payLoad.put("type", "EVENT");
String payloadStr = mapper.writeValueAsString(payLoad);
assertTrue(materializedFilter.call(payloadStr));
}
}
| 8,484 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/core/QueryRegistry.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.core;
import static io.mantisrx.connector.publish.core.ObjectUtils.checkNotNull;
import io.mantisrx.publish.proto.MantisServerSubscription;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class QueryRegistry {
public static final String ANY = "ANY";
private static final Logger LOGGER = LoggerFactory.getLogger(QueryRegistry.class);
private final Map<String, String> emptyMap = new HashMap<>(0);
private final ConcurrentMap<String, QueryMap> appToSubscriptionMap = new ConcurrentHashMap<>();
private final String clientIdPrefix;
private QueryRegistry(String clientIdPrefix) {
this.clientIdPrefix = clientIdPrefix;
}
public void registerQuery(String targetApp, String subId, String query) {
registerQuery(targetApp, subId, query, this.emptyMap, false);
}
public void registerQuery(String targetApp, String subId, String query, Map<String, String> additionalParams, boolean validateQueryAsGroovy) {
checkNotNull("subscriptionId", subId);
checkNotNull("query", query);
checkNotNull("targetAppName", targetApp);
Map<String, String> addParams = (additionalParams == null) ? emptyMap : additionalParams;
appToSubscriptionMap.putIfAbsent(targetApp, new QueryMap(clientIdPrefix));
appToSubscriptionMap.get(targetApp).registerQuery(subId, query, additionalParams, validateQueryAsGroovy);
}
public boolean deregisterQuery(String targetApp, String subId, String query) {
appToSubscriptionMap.computeIfPresent(targetApp, (k, v) -> {
v.deregisterQuery(subId, query);
return v;
});
return true;
}
public List<MantisServerSubscription> getCurrentSubscriptionsForApp(String app) {
List<MantisServerSubscription> subsForApp = (appToSubscriptionMap.containsKey(app)) ? appToSubscriptionMap.get(app).getCurrentSubscriptions() : new ArrayList<>();
if (!app.equals(ANY) && appToSubscriptionMap.containsKey(ANY)) {
subsForApp.addAll(appToSubscriptionMap.get(ANY).getCurrentSubscriptions());
}
return subsForApp;
}
/**
* Returns a list of {@link MantisServerSubscription}s.
*
* @param queryParams key-value pairs of stream-queries.
*/
public List<MantisServerSubscription> getCurrentSubscriptions(Map<String, List<String>> queryParams) {
String app = ANY;
if (queryParams.containsKey("app")) {
app = queryParams.get("app").get(0);
}
return getCurrentSubscriptionsForApp(app);
}
public Map<String, List<MantisServerSubscription>> getAllSubscriptions() {
Map<String, List<MantisServerSubscription>> allSubMap = new HashMap<>();
appToSubscriptionMap.forEach((s, q) -> {
allSubMap.put(s, q.getCurrentSubscriptions());
});
return allSubMap;
}
private String addMantisPrefix(String subId) {
return clientIdPrefix + "_" + subId;
}
public static class Builder {
private String prefix = null;
public Builder() {
}
public Builder withClientIdPrefix(String prefix) {
checkNotNull("prefix", prefix);
this.prefix = prefix;
return this;
}
public QueryRegistry build() {
checkNotNull("prefix", this.prefix);
return new QueryRegistry(prefix);
}
}
}
| 8,485 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/core/QueryMap.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.core;
import static io.mantisrx.connector.publish.core.ObjectUtils.checkNotNull;
import io.mantisrx.publish.proto.MantisServerSubscription;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class QueryMap {
private static final Logger LOGGER = LoggerFactory.getLogger(QueryMap.class);
private final Map<String, String> emptyMap = new HashMap<>(0);
private final ConcurrentHashMap<String, MantisServerSubscriptionWrapper> subscriptionMap =
new ConcurrentHashMap<>();
private final ConcurrentMap<String, ConcurrentMap<String, MantisServerSubscriptionWrapper>> appToSubscriptionMap =
new ConcurrentHashMap<>();
private final String clientIdPrefix;
QueryMap(String clientIdPrefix) {
this.clientIdPrefix = clientIdPrefix;
}
void registerQuery(String subId, String query, Map<String, String> emptyMap) {
registerQuery(subId, query, this.emptyMap, false);
}
void registerQuery(String subId, String query,
Map<String, String> additionalParams,
boolean validateQueryAsGroovy) {
checkNotNull("subscriptionId", subId);
checkNotNull("query", query);
Map<String, String> addParams = (additionalParams == null) ? emptyMap : additionalParams;
subscriptionMap.computeIfAbsent(subId, (s) -> new MantisServerSubscriptionWrapper(addMantisPrefix(subId), query, addParams)).incrementAndGetRefCount();
}
boolean deregisterQuery(String subId, String query) {
MantisServerSubscriptionWrapper subscription = subscriptionMap.computeIfPresent(subId, (k, v) -> {
v.decrementRefCount();
return v;
});
if (subscription != null) {
if (subscription.getRefCount() <= 0) {
LOGGER.info("Subscription ref count is 0 for subscriptionId " + subId + " removing subscription");
subscriptionMap.remove(subId);
} else {
LOGGER.info("Subscription ref count decremented for subscriptionId " + subId);
}
} else {
LOGGER.info("Subscription " + subId + " not found");
}
return true;
}
public List<MantisServerSubscription> getCurrentSubscriptions() {
return subscriptionMap.values().stream().map(MantisServerSubscriptionWrapper::getSubscription).collect(Collectors.toList());
}
private String addMantisPrefix(String subId) {
return clientIdPrefix + "_" + subId;
}
public static class Builder {
String prefix = null;
Builder() {
}
Builder withClientIdPrefix(String prefix) {
checkNotNull("prefix", prefix);
this.prefix = prefix;
return this;
}
QueryMap build() {
checkNotNull("prefix", this.prefix);
return new QueryMap(prefix);
}
}
public static class MantisServerSubscriptionWrapper {
private final MantisServerSubscription subscription;
// Used to dedup erroneous subscriptions from client.
AtomicInteger refCount = new AtomicInteger();
MantisServerSubscriptionWrapper(String subId,
String query,
Map<String, String> additionalParams) {
this.subscription = new MantisServerSubscription(subId, query, additionalParams);
}
MantisServerSubscription getSubscription() {
return this.subscription;
}
int incrementAndGetRefCount() {
return refCount.incrementAndGet();
}
void decrementRefCount() {
refCount.decrementAndGet();
}
int getRefCount() {
return refCount.get();
}
@Override
public String toString() {
return "MantisServerSubscriptionWrapper{"
+ " subscription=" + subscription
+ ", refCount=" + refCount
+ '}';
}
}
}
| 8,486 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/core/ObjectUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.core;
import java.util.Arrays;
import java.util.List;
public class ObjectUtils {
public static void checkNotNull(String paramName, String param) {
if (param == null || param.isEmpty()) {
throw new IllegalArgumentException(paramName + " cannot be null");
}
}
public static void checkArgCondition(String paramName, boolean condition) {
if (!condition) {
throw new IllegalArgumentException(paramName + " is invalid");
}
}
public static List<String> convertCommaSeparatedStringToList(String str) {
return Arrays.asList(str.trim().split("\\,"));
}
}
| 8,487 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/core/EventFilter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.core;
import com.mantisrx.common.utils.MantisSourceJobConstants;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import rx.functions.Func1;
public class EventFilter implements Func1<Map<String, List<String>>, Func1<String, Boolean>> {
private static final Logger LOGGER = Logger.getLogger(EventFilter.class);
private final String clientId;
public EventFilter(String clientId) {
ObjectUtils.checkNotNull("clientId", clientId);
this.clientId = clientId;
}
@Override
public Func1<String, Boolean> call(Map<String, List<String>> parameters) {
Func1<String, Boolean> filter = t1 -> true;
if (parameters != null) {
if (parameters.containsKey(MantisSourceJobConstants.FILTER_PARAM_NAME)) {
String filterBy = parameters.get(MantisSourceJobConstants.FILTER_PARAM_NAME).get(0);
List<String> terms = convertCommaSeparatedEventsToList(filterBy);
LOGGER.info("terms: " + terms);
// Create filter function based on parameter value.
filter = new SourceEventFilter(terms);
} else if (parameters.containsKey(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME)) {
String subId = parameters.get(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME).get(0);
List<String> terms = new ArrayList<String>();
terms.add(clientId + "_" + subId);
filter = new SourceEventFilter(terms);
}
return filter;
}
return filter;
}
private List<String> convertCommaSeparatedEventsToList(String filterBy) {
List<String> terms = new ArrayList<>();
if (filterBy != null && !filterBy.isEmpty()) {
terms = Arrays.asList(filterBy.split("\\s*,\\s*"));
}
return terms;
}
private static class SourceEventFilter implements Func1<String, Boolean> {
private List<String> terms;
SourceEventFilter(List<String> terms) {
this.terms = terms;
LOGGER.info("Initiated with terms" + terms);
}
@Override
public Boolean call(String data) {
boolean match = true;
if (data != null && !data.isEmpty()) {
for (String term : terms) {
if (!data.contains(term)) {
match = false;
break;
}
}
} else {
match = false;
}
return match;
}
}
}
| 8,488 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/NettyExceptionHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION;
import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.DefaultHttpResponse;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.HttpResponse;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.HttpUtil;
import io.netty.handler.codec.http.LastHttpContent;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class NettyExceptionHandler extends SimpleChannelInboundHandler<HttpRequest> {
private final Map<String, String> responseHeaders = new HashMap<>();
private static Logger logger = LoggerFactory.getLogger(NettyExceptionHandler.class);
// MetricGroupId metricGroupId;
// Counter invalidRequestCount;
public NettyExceptionHandler() {
// metricGroupId = new MetricGroupId(METRIC_GROUP + "_incoming");
//
// Metrics m = new Metrics.Builder().id(metricGroupId).addCounter("InvalidRequestCount").build();
//
// m = MetricsRegistry.getInstance().registerAndGet(m);
//
// invalidRequestCount = m.getCounter("InvalidRequestCount");
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpRequest message) {
// we can't deal with this message. No one in the pipeline handled it. Log it.
logger.warn("Unknown message received: {}", message);
// invalidRequestCount.increment();
sendResponse(
ctx,
false,
message + " Bad request received.",
HttpResponseStatus.BAD_REQUEST, responseHeaders)
;
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
// invalidRequestCount.increment();
logger.warn("Unhandled exception", cause);
sendResponse(
ctx,
false,
"Internal server error: " + cause.getMessage(),
HttpResponseStatus.INTERNAL_SERVER_ERROR,
responseHeaders);
}
/**
* Sends the given response and status code to the given channel.
*
* @param channelHandlerContext identifying the open channel
* @param keepAlive If the connection should be kept alive.
* @param message which should be sent
* @param statusCode of the message to send
* @param headers additional header values
*/
public static CompletableFuture<Void> sendResponse(
ChannelHandlerContext channelHandlerContext,
boolean keepAlive,
String message,
HttpResponseStatus statusCode,
Map<String, String> headers) {
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, statusCode);
response.headers().set(CONTENT_TYPE, "application/json");
for (Map.Entry<String, String> headerEntry : headers.entrySet()) {
response.headers().set(headerEntry.getKey(), headerEntry.getValue());
}
if (keepAlive) {
response.headers().set(CONNECTION, HttpHeaderValues.KEEP_ALIVE);
}
byte[] buf = message.getBytes(StandardCharsets.UTF_8);
ByteBuf b = Unpooled.copiedBuffer(buf);
HttpUtil.setContentLength(response, buf.length);
// write the initial line and the header.
channelHandlerContext.write(response);
channelHandlerContext.write(b);
ChannelFuture lastContentFuture = channelHandlerContext.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
// close the connection, if no keep-alive is needed
if (!keepAlive) {
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
}
return toCompletableFuture(lastContentFuture);
}
private static CompletableFuture<Void> toCompletableFuture(final ChannelFuture channelFuture) {
final CompletableFuture<Void> completableFuture = new CompletableFuture<>();
channelFuture.addListener(future -> {
if (future.isSuccess()) {
completableFuture.complete(null);
} else {
completableFuture.completeExceptionally(future.cause());
}
});
return completableFuture;
}
}
| 8,489 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/SourceHttpServer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import io.mantisrx.connector.publish.core.QueryRegistry;
import rx.subjects.Subject;
public interface SourceHttpServer {
public static final String METRIC_GROUP = "PushServer";
enum State {
NOTINITED,
INITED,
RUNNING,
SHUTDOWN
}
void init(QueryRegistry registry, Subject<String, String> eventSubject, int port) throws InterruptedException;
void startServer();
void shutdownServer();
}
| 8,490 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/NettySourceHttpServer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import io.mantisrx.connector.publish.core.QueryRegistry;
import io.mantisrx.runtime.Context;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelOption;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.subjects.Subject;
public class NettySourceHttpServer implements SourceHttpServer {
private static final Logger LOGGER = LoggerFactory.getLogger(NettySourceHttpServer.class);
private final NioEventLoopGroup workerGroup;
private final NioEventLoopGroup bossGroup;
private Runnable nettyServerRunnable;
private volatile boolean isInitialized = false;
private volatile boolean isStarted = false;
public NettySourceHttpServer(Context context, int threadCount) {
this.bossGroup = new NioEventLoopGroup(threadCount);
this.workerGroup = new NioEventLoopGroup();
}
@Override
public void init(QueryRegistry queryRegistry, Subject<String, String> eventSubject, int port) {
if (!isInitialized) {
nettyServerRunnable = () -> {
try {
ServerBootstrap b = new ServerBootstrap();
b.option(ChannelOption.SO_BACKLOG, 1024);
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new HttpServerInitializer(queryRegistry, eventSubject));
Channel ch = b.bind(port).sync().channel();
ch.closeFuture().sync();
} catch (Exception e) {
LOGGER.error(e.getMessage());
} finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
};
isInitialized = true;
}
}
@Override
public void startServer() {
if (isInitialized && !isStarted) {
ExecutorService executor = Executors.newSingleThreadExecutor();
executor.submit(nettyServerRunnable);
Runtime.getRuntime().addShutdownHook(new Thread(this::shutdownServer));
isStarted = true;
} else {
throw new IllegalStateException("Server already started");
}
}
@Override
public void shutdownServer() {
if (isInitialized && isStarted) {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
}
| 8,491 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/HttpServerInitializer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import io.mantisrx.connector.publish.core.QueryRegistry;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.codec.http.HttpContentDecompressor;
import io.netty.handler.codec.http.HttpObjectAggregator;
import io.netty.handler.codec.http.HttpServerCodec;
import rx.subjects.Subject;
public class HttpServerInitializer extends ChannelInitializer<SocketChannel> {
private final QueryRegistry registry;
private final Subject<String, String> eventSubject;
private static final int DEFAULT_MAX_INITIAL_LENGTH = 4096;
private static final int DEFAULT_MAX_HEADER_SIZE = 16384;
private static final int DEFAULT_MAX_CHUNK_SIZE = 32768;
private static final int DEFAULT_MAX_CONTENT_LENGTH = 1048576;
public HttpServerInitializer(QueryRegistry registry, Subject<String, String> eventSubject) {
this.registry = registry;
this.eventSubject = eventSubject;
}
@Override
protected void initChannel(SocketChannel ch) {
ChannelPipeline p = ch.pipeline();
p.addLast("http", new HttpServerCodec(DEFAULT_MAX_INITIAL_LENGTH, DEFAULT_MAX_HEADER_SIZE, DEFAULT_MAX_CHUNK_SIZE));
p.addLast("inflater", new HttpContentDecompressor());
p.addLast("aggregator", new HttpObjectAggregator(DEFAULT_MAX_CONTENT_LENGTH));
p.addLast(new HttpSourceServerHandler(registry, eventSubject));
p.addLast(new NettyExceptionHandler());
}
}
| 8,492 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/SourceSink.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import io.mantisrx.connector.publish.core.EventFilter;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.sink.ServerSentEventsSink;
import io.mantisrx.runtime.sink.Sink;
import io.mantisrx.runtime.sink.predicate.Predicate;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import rx.Observable;
import rx.Subscription;
import rx.functions.Func2;
public class SourceSink implements Sink<String> {
private final ServerSentEventsSink<String> sink;
private Subscription subscription;
static class NoOpProcessor implements Func2<Map<String, List<String>>, Context, Void> {
@Override
public Void call(Map<String, List<String>> t1, Context t2) {
return null;
}
}
public SourceSink(Func2<Map<String, List<String>>, Context, Void> preProcessor,
Func2<Map<String, List<String>>, Context, Void> postProcessor, String mantisClientId) {
this.sink = new ServerSentEventsSink.Builder<String>()
.withEncoder(data -> data)
.withPredicate(new Predicate<>("description", new EventFilter(mantisClientId)))
.withRequestPreprocessor(preProcessor)
.withRequestPostprocessor(postProcessor)
.build();
}
@Override
public void call(Context context, PortRequest portRequest,
Observable<String> observable) {
observable = observable.filter(t1 -> !t1.isEmpty());
subscription = observable.subscribe();
sink.call(context, portRequest, observable);
}
@Override
public void close() throws IOException {
try {
sink.close();
} finally {
subscription.unsubscribe();
}
}
}
| 8,493 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/PushHttpSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import com.mantisrx.common.utils.MantisSourceJobConstants;
import io.mantisrx.connector.publish.core.QueryRegistry;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.WorkerMap;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import io.reactivx.mantis.operators.DropOperator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.schedulers.Schedulers;
import rx.subjects.PublishSubject;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
public class PushHttpSource implements Source<String> {
private static final Logger LOGGER = LoggerFactory.getLogger(PushHttpSource.class);
private final Subject<String, String> eventSubject = new SerializedSubject<>(PublishSubject.create());
private final QueryRegistry queryRegistry;
private final int serverPort;
private AtomicReference<WorkerMap> workerMapAtomicReference = new AtomicReference<>(new WorkerMap(new HashMap<>()));
private static final String NETTY_THREAD_COUNT_PARAM_NAME = "nettyThreadCount";
private SourceHttpServer server;
public PushHttpSource(QueryRegistry registry, int serverPort) {
this.queryRegistry = registry;
this.serverPort = serverPort;
}
@Override
public Observable<Observable<String>> call(Context context, Index index) {
return Observable.just(eventSubject
.lift(new DropOperator<>("incoming_" + PushHttpSource.class.getCanonicalName() + "_batch"))
.onErrorResumeNext((e) -> Observable.empty()));
}
@Override
public void init(Context context, Index index) {
LOGGER.info("Initializing PushHttpSource");
int threadCount = (Integer) context.getParameters().get(NETTY_THREAD_COUNT_PARAM_NAME, 4);
LOGGER.info("PushHttpSource server starting at Port " + serverPort);
server = new NettySourceHttpServer(context, threadCount);
try {
server.init(queryRegistry, eventSubject, serverPort);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
server.startServer();
context.getWorkerMapObservable().subscribeOn(Schedulers.io()).subscribe((workerMap) -> {
LOGGER.info("Got WorkerUpdate" + workerMap);
workerMapAtomicReference.set(workerMap);
});
LOGGER.info("PushHttpSource server started");
}
@Override
public List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> parameters = new ArrayList<>();
parameters.add(new IntParameter()
.name(NETTY_THREAD_COUNT_PARAM_NAME)
.validator(Validators.range(1, 8))
.defaultValue(4)
.build());
parameters.add(new StringParameter()
.name(MantisSourceJobConstants.ZONE_LIST_PARAMETER_NAME)
.description("list of Zones")
.validator(Validators.alwaysPass())
.defaultValue("")
.build());
parameters.add(new StringParameter()
.name(MantisSourceJobConstants.TARGET_APP_PARAMETER_NAME)
.description("target app")
.validator(Validators.alwaysPass())
.defaultValue("")
.build());
parameters.add(new StringParameter()
.name(MantisSourceJobConstants.TARGET_ASG_CSV_PARAM)
.description("target ASGs CSV regex")
.validator(Validators.alwaysPass())
.defaultValue("")
.build());
return parameters;
}
@Override
public void close() throws IOException {
if (server != null) {
server.shutdownServer();
server = null;
}
}
}
| 8,494 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/HttpSourceServerHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import static io.netty.handler.codec.http.HttpResponseStatus.OK;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.connector.publish.core.QueryRegistry;
import io.mantisrx.publish.proto.MantisServerSubscription;
import io.mantisrx.publish.proto.MantisServerSubscriptionEnvelope;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.FullHttpMessage;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpObject;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.HttpUtil;
import io.netty.util.AsciiString;
import io.netty.util.CharsetUtil;
import java.util.List;
import mantis.io.reactivex.netty.protocol.http.server.UriInfoHolder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.subjects.Subject;
public class HttpSourceServerHandler extends SimpleChannelInboundHandler<HttpObject> {
private static final Logger LOGGER = LoggerFactory.getLogger(HttpSourceServerHandler.class);
private static final byte[] CONTENT = {'O', 'K'};
private static final AsciiString CONTENT_TYPE = AsciiString.cached("Content-Type");
private static final AsciiString CONTENT_LENGTH = AsciiString.cached("Content-Length");
private static final AsciiString CONNECTION = AsciiString.cached("Connection");
private static final AsciiString KEEP_ALIVE = AsciiString.cached("keep-alive");
ObjectMapper mapper = new ObjectMapper();
private final Counter getRequestCount;
private final Counter unknownRequestCount;
private final Counter postRequestCount;
MetricGroupId metricGroupId;
private final QueryRegistry registry;
private final Subject<String, String> eventSubject;
public HttpSourceServerHandler(QueryRegistry queryRegistry, Subject<String, String> eventSubject) {
registry = queryRegistry;
this.eventSubject = eventSubject;
metricGroupId = new MetricGroupId(SourceHttpServer.METRIC_GROUP + "_incoming");
Metrics m = new Metrics.Builder()
.id(metricGroupId)
.addCounter("GetRequestCount")
.addCounter("PostRequestCount")
.addCounter("UnknownRequestCount")
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
getRequestCount = m.getCounter("GetRequestCount");
unknownRequestCount = m.getCounter("UnknownRequestCount");
postRequestCount = m.getCounter("PostRequestCount");
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
ctx.flush();
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) {
if (msg instanceof HttpRequest) {
HttpRequest req = (HttpRequest) msg;
boolean keepAlive = HttpUtil.isKeepAlive(req);
if (req.method().equals(HttpMethod.GET)) {
getRequestCount.increment();
UriInfoHolder uriInfoHolder = new UriInfoHolder(req.uri());
List<MantisServerSubscription> currentSubscriptions =
registry.getCurrentSubscriptions(uriInfoHolder.getQueryParameters());
try {
byte[] serializedSubs =
mapper.writeValueAsBytes(new MantisServerSubscriptionEnvelope(currentSubscriptions));
FullHttpResponse response =
new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(serializedSubs));
response.headers().set(CONTENT_TYPE, "application/json");
response.headers().setInt(CONTENT_LENGTH, response.content().readableBytes());
if (!keepAlive) {
ctx.write(response).addListener(ChannelFutureListener.CLOSE);
} else {
response.headers().set(CONNECTION, KEEP_ALIVE);
ctx.write(response);
}
} catch (Exception e) {
LOGGER.error("problem reading from channel", e);
}
} else {
if (req.method().equals(HttpMethod.POST)) {
postRequestCount.increment();
FullHttpMessage aggregator = (FullHttpMessage) msg;
ByteBuf content = aggregator.content();
String data = content.toString(CharsetUtil.UTF_8);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("got data " + data);
}
eventSubject.onNext(data);
FullHttpResponse response =
new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(CONTENT));
response.headers().set(CONTENT_TYPE, "text/plain");
response.headers().setInt(CONTENT_LENGTH, response.content().readableBytes());
if (!keepAlive) {
ctx.write(response).addListener(ChannelFutureListener.CLOSE);
} else {
response.headers().set(CONNECTION, KEEP_ALIVE);
ctx.write(response);
}
} else {
unknownRequestCount.increment();
}
}
}
}
}
| 8,495 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/StageOverrideParameters.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink;
import com.google.common.collect.Lists;
import io.mantisrx.connector.iceberg.sink.committer.IcebergCommitterStage;
import io.mantisrx.connector.iceberg.sink.config.SinkProperties;
import io.mantisrx.connector.iceberg.sink.writer.IcebergWriterStage;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.ParameterUtils;
import io.mantisrx.runtime.parameter.Parameters;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class StageOverrideParameters {
private StageOverrideParameters() {
}
public static Parameters newParameters() {
Map<String, ParameterDefinition<?>> definition = new HashMap<>();
IcebergWriterStage.parameters().forEach(p -> definition.put(p.getName(), p));
IcebergCommitterStage.parameters().forEach(p -> definition.put(p.getName(), p));
List<Parameter> parameters = Lists.newArrayList(
new Parameter(SinkProperties.SINK_CATALOG, "catalog"),
new Parameter(SinkProperties.SINK_DATABASE, "database"),
new Parameter(SinkProperties.SINK_TABLE, "table"),
new Parameter(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC, "500"),
new Parameter(WriterProperties.WRITER_MAXIMUM_POOL_SIZE, "10")
);
return ParameterUtils.createContextParameters(definition, parameters);
}
}
| 8,496 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/codecs/IcebergCodecsTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.codecs;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
import io.mantisrx.common.codec.Codec;
import java.util.Collections;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.Metrics;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.types.Types;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
class IcebergCodecsTest {
private static final Schema SCHEMA =
new Schema(Types.NestedField.required(1, "id", Types.IntegerType.get()));
private Codec<Record> recordCodec;
private Codec<DataFile> dataFileCodec;
@BeforeEach
void setUp() {
this.recordCodec = IcebergCodecs.record(SCHEMA);
this.dataFileCodec = IcebergCodecs.dataFile();
}
@Test
void shouldEncodeAndDecodeRecord() {
Record expected = GenericRecord.create(SCHEMA);
expected.setField("id", 1);
byte[] encoded = recordCodec.encode(expected);
Record actual = recordCodec.decode(encoded);
assertEquals(expected, actual);
}
@Test
void shouldEncodeAndDecodeDataFile() {
PartitionSpec spec = PartitionSpec.unpartitioned();
DataFile expected = DataFiles.builder(spec)
.withPath("/path/filename.parquet")
.withFileSizeInBytes(1)
.withPartition(null)
.withMetrics(mock(Metrics.class))
.withSplitOffsets(Collections.singletonList(1L))
.build();
byte[] encoded = dataFileCodec.encode(expected);
DataFile actual = dataFileCodec.decode(encoded);
assertEquals(expected.path(), actual.path());
assertEquals(expected.fileSizeInBytes(), actual.fileSizeInBytes());
assertEquals(expected.partition(), actual.partition());
assertEquals(expected.splitOffsets(), actual.splitOffsets());
}
} | 8,497 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/writer/IcebergWriterEndToEndTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import static io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties.WRITER_FILE_FORMAT_DEFAULT;
import static io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties.WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT;
import static io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties.WRITER_MAXIMUM_POOL_SIZE_DEFAULT;
import static io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties.WRITER_ROW_GROUP_SIZE_DEFAULT;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import io.mantisrx.connector.iceberg.sink.writer.IcebergWriterStage.Transformer;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig;
import io.mantisrx.connector.iceberg.sink.writer.factory.DefaultIcebergWriterFactory;
import io.mantisrx.connector.iceberg.sink.writer.factory.IcebergWriterFactory;
import io.mantisrx.connector.iceberg.sink.writer.metrics.WriterMetrics;
import io.mantisrx.connector.iceberg.sink.writer.partitioner.Partitioner;
import io.mantisrx.connector.iceberg.sink.writer.pool.FixedIcebergWriterPool;
import io.mantisrx.connector.iceberg.sink.writer.pool.IcebergWriterPool;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.TestWorkerInfo;
import io.mantisrx.runtime.WorkerInfo;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.types.Types;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.extension.RegisterExtension;
import rx.Observable;
import rx.Subscription;
@Slf4j
public class IcebergWriterEndToEndTest {
// records of the following shape
// {
// "partition": 1,
// "id": 2
// }
private static final Schema SCHEMA =
new Schema(
Types.NestedField.required(1, "partition", Types.IntegerType.get()),
Types.NestedField.required(2, "id", Types.IntegerType.get()));
private static final PartitionSpec SPEC =
PartitionSpec.builderFor(SCHEMA).identity("partition").build();
@RegisterExtension
static IcebergTableExtension tableExtension =
IcebergTableExtension.builder()
.schema(SCHEMA)
.spec(SPEC)
.build();
private static final WorkerInfo WORKER_INFO =
new TestWorkerInfo("testJobName", "jobId", 1, 1, 1, MantisJobDurationType.Perpetual,
"host");
private Partitioner partitioner = record -> {
GenericRecord partitionRecord = GenericRecord.create(SPEC.schema());
partitionRecord.setField("partition", 1);
return partitionRecord;
};
private Context stageContext = mock(Context.class);
@BeforeEach
public void initStageContext() {
when(stageContext.getWorkerInfo()).thenReturn(WORKER_INFO);
}
@Disabled("flaky test; probably needs a higher value in sleep!")
public void testTransformerEndToEnd() throws Exception {
final WriterConfig writerConfig = new WriterConfig(
tableExtension.getCatalog(),
tableExtension.getDatabase(),
tableExtension.getTableName(),
WRITER_ROW_GROUP_SIZE_DEFAULT,
Long.parseLong(WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT),
100L,
WRITER_FILE_FORMAT_DEFAULT,
WRITER_MAXIMUM_POOL_SIZE_DEFAULT,
new Configuration()
);
final IcebergWriterFactory writerFactory =
new DefaultIcebergWriterFactory(writerConfig, WORKER_INFO, tableExtension.getTable(),
tableExtension.getLocationProvider());
final IcebergWriterPool writerPool = new FixedIcebergWriterPool(writerFactory, writerConfig);
Transformer transformer =
IcebergWriterStage.newTransformer(writerConfig, new WriterMetrics(), writerPool, partitioner,
WORKER_INFO, null);
final int size = 1000;
// odd numbers observable
Observable<GenericRecord> oddObservable =
Observable
.interval(0, 1, TimeUnit.MILLISECONDS)
.filter(val -> (val % 2 == 1))
.takeUntil(val -> val > size)
.map(val -> {
GenericRecord record = GenericRecord.create(SCHEMA);
record.setField("partition", 1);
record.setField("id", val);
return record;
})
.publish()
.autoConnect();
// even numbers observable
Observable<GenericRecord> evenObservable =
Observable
.interval(size / 2, 1, TimeUnit.MILLISECONDS)
.filter(val -> (val % 2 == 0))
.takeUntil(val -> val + (size / 2) > size)
.map(val -> {
GenericRecord record = GenericRecord.create(SCHEMA);
record.setField("partition", 1);
record.setField("id", val);
return record;
})
.publish()
.autoConnect();
Observable<Record> recordObservable = Observable.merge(oddObservable, evenObservable);
Observable<MantisDataFile> dataFileObservable = transformer.call(recordObservable.map(s -> new MantisRecord(s, null)));
AtomicReference<Throwable> failure = new AtomicReference<>();
List<MantisDataFile> dataFileList = new ArrayList<>();
Subscription subscription = dataFileObservable.subscribe(dataFileList::add, failure::set);
Thread.sleep(TimeUnit.SECONDS.toMillis(15));
if (failure.get() != null) {
throw new Exception(failure.get());
}
log.info("Collected {} data files successfully", dataFileList.size());
}
}
| 8,498 |
0 | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/writer/IcebergTableExtension.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import io.mantisrx.shaded.com.google.common.io.Files;
import java.io.File;
import lombok.Builder;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.hadoop.HadoopTables;
import org.apache.iceberg.io.LocationProvider;
import org.junit.jupiter.api.extension.AfterEachCallback;
import org.junit.jupiter.api.extension.BeforeAllCallback;
import org.junit.jupiter.api.extension.BeforeEachCallback;
import org.junit.jupiter.api.extension.ExtensionContext;
/**
* Junit Jupiter Extension to create Iceberg Tables for unit testing with a specified schema,
* properties, etc.... and to also clean the files after unit tests.
* <p>
* The way to use the IcebergTableExtension is by adding the following code to your test class. This
* creates the table before the test is executed.
* <pre>
* @RegisterExtension
* static IcebergTableExtension tableExtension =
* IcebergTableExtension.builder()
* .schema(SCHEMA)
* .spec(SPEC)
* .build();
* </pre>
*
* <p> The created table can be obtained by the {@link IcebergTableExtension#getTable()} method.
*/
@Slf4j
@Builder
public class IcebergTableExtension implements BeforeAllCallback, BeforeEachCallback,
AfterEachCallback {
private File rootDir;
@Getter
@Builder.Default
private String catalog = "catalog";
@Getter
@Builder.Default
private String database = "database";
@Getter
@Builder.Default
private String tableName = "table";
@Getter
private Schema schema;
private PartitionSpec spec;
@Getter
private Table table;
@Override
public void beforeAll(ExtensionContext context) throws Exception {
log.info("Before All");
}
@Override
public void beforeEach(ExtensionContext context) throws Exception {
log.info("Before Each");
if (rootDir == null) {
rootDir = Files.createTempDir();
}
final File tableDir = new File(rootDir, getTableIdentifier().toString());
final HadoopTables tables = new HadoopTables();
table = tables.create(schema, spec, tableDir.getPath());
}
@Override
public void afterEach(ExtensionContext context) throws Exception {
FileUtils.deleteDirectory(rootDir);
rootDir = null;
}
public LocationProvider getLocationProvider() {
return table.locationProvider();
}
public TableIdentifier getTableIdentifier() {
return TableIdentifier.of(catalog, database, tableName);
}
}
| 8,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.