index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/mantis/mantis-examples/mantis-examples-synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis/mantis-examples/mantis-examples-synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/sink/TaggedEventFilter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.sink;
import static com.mantisrx.common.utils.MantisSourceJobConstants.CLIENT_ID_PARAMETER_NAME;
import static com.mantisrx.common.utils.MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME;
import io.mantisrx.common.MantisProperties;
import io.mantisrx.sourcejob.synthetic.core.TaggedData;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import rx.functions.Func1;
/**
* This is a predicate that decides what data to send to the downstream client. The data is tagged with the clientId
* and subscriptionId of the intended recipient.
*/
@Slf4j
public class TaggedEventFilter implements Func1<Map<String, List<String>>, Func1<TaggedData, Boolean>> {
@Override
public Func1<TaggedData, Boolean> call(Map<String, List<String>> parameters) {
Func1<TaggedData, Boolean> filter = t1 -> true;
if (parameters != null) {
if (parameters.containsKey(SUBSCRIPTION_ID_PARAM_NAME)) {
String subId = parameters.get(SUBSCRIPTION_ID_PARAM_NAME).get(0);
String clientId = parameters.get(CLIENT_ID_PARAMETER_NAME).get(0);
List<String> terms = new ArrayList<String>();
if (clientId != null && !clientId.isEmpty()) {
terms.add(clientId + "_" + subId);
} else {
terms.add(subId);
}
filter = new SourceEventFilter(terms);
}
return filter;
}
return filter;
}
private static class SourceEventFilter implements Func1<TaggedData, Boolean> {
private String jobId = "UNKNOWN";
private String jobName = "UNKNOWN";
private List<String> terms;
SourceEventFilter(List<String> terms) {
this.terms = terms;
String jId = MantisProperties.getProperty("JOB_ID");
if (jId != null && !jId.isEmpty()) {
jobId = jId;
}
String jName = MantisProperties.getProperty("JOB_NAME");
if (jName != null && !jName.isEmpty()) {
jobName = jName;
}
log.info("Created SourceEventFilter! for subId " + terms.toString() + " in Job : " + jobName + " with Id " + jobId);
}
@Override
public Boolean call(TaggedData data) {
boolean match = true;
for (String term : terms) {
if (!data.matchesClient(term)) {
match = false;
break;
}
}
return match;
}
}
}
| 7,700 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis/mantis-examples/mantis-examples-synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/sink/QueryRequestPreProcessor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.sink;
import static com.mantisrx.common.utils.MantisSourceJobConstants.CRITERION_PARAM_NAME;
import static com.mantisrx.common.utils.MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME;
import io.mantisrx.runtime.Context;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import rx.functions.Func2;
/**
* This is a callback that is invoked when a new client connects to this sink of this job.
* The callback is used to extract useful query parameters the user may have set on the GET request such as
* the clientId, subscriptionId and the criterion.
* The clientId identifies a group of connections belonging to the same consumer. Data is sent round-robin amongst
* all clients with the same clientId
* The subscriptionId tracks this particular client.
* The criterion is a valid MQL query. It indicates what data this client is interested in.
*/
@Slf4j
public class QueryRequestPreProcessor implements Func2<Map<String, List<String>>, Context, Void> {
public QueryRequestPreProcessor() { }
@Override
public Void call(Map<String, List<String>> queryParams, Context context) {
log.info("QueryRequestPreProcessor:queryParams: {}", queryParams);
if (queryParams != null) {
if (queryParams.containsKey(SUBSCRIPTION_ID_PARAM_NAME) && queryParams.containsKey(CRITERION_PARAM_NAME)) {
final String subId = queryParams.get(SUBSCRIPTION_ID_PARAM_NAME).get(0);
final String query = queryParams.get(CRITERION_PARAM_NAME).get(0);
final String clientId = queryParams.get("clientId").get(0);
if (subId != null && query != null) {
try {
log.info("Registering query {}", query);
if (clientId != null && !clientId.isEmpty()) {
registerQuery(clientId + "_" + subId, query);
} else {
registerQuery(subId, query);
}
} catch (Throwable t) {
log.error("Error registering query", t);
}
}
}
}
return null;
}
private static synchronized void registerQuery(String subId, String query) {
QueryRefCountMap.INSTANCE.addQuery(subId, query);
}
}
| 7,701 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis/mantis-examples/mantis-examples-synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/sink/QueryRefCountMap.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.sink;
import io.mantisrx.sourcejob.synthetic.core.MQLQueryManager;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import lombok.extern.slf4j.Slf4j;
/**
* This class keeps track of number of clients that have the exact same query registered for
* deduplication purposes.
* When all references to a query are gone the query is deregistered.
*/
@Slf4j
final class QueryRefCountMap {
public static final QueryRefCountMap INSTANCE = new QueryRefCountMap();
private final ConcurrentHashMap<String, AtomicInteger> refCntMap = new ConcurrentHashMap<>();
private QueryRefCountMap() { }
void addQuery(String subId, String query) {
log.info("adding query " + subId + " query " + query);
if (refCntMap.containsKey(subId)) {
int newVal = refCntMap.get(subId).incrementAndGet();
log.info("query exists already incrementing refcnt to " + newVal);
} else {
MQLQueryManager.getInstance().registerQuery(subId, query);
refCntMap.putIfAbsent(subId, new AtomicInteger(1));
log.info("new query registering it");
}
}
void removeQuery(String subId) {
if (refCntMap.containsKey(subId)) {
AtomicInteger refCnt = refCntMap.get(subId);
int currVal = refCnt.decrementAndGet();
if (currVal == 0) {
MQLQueryManager.getInstance().deregisterQuery(subId);
refCntMap.remove(subId);
log.info("All references to query are gone removing query");
} else {
log.info("References to query still exist. decrementing refcnt to " + currVal);
}
} else {
log.warn("No query with subscriptionId " + subId);
}
}
/**
* For testing
*
* @param subId
*
* @return
*/
int getQueryRefCount(String subId) {
if (refCntMap.containsKey(subId)) {
return refCntMap.get(subId).get();
} else {
return 0;
}
}
}
| 7,702 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic | Create_ds/mantis/mantis-examples/mantis-examples-synthetic-sourcejob/src/main/java/io/mantisrx/sourcejob/synthetic/stage/TaggingStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.synthetic.stage;
import io.mantisrx.common.JsonSerializer;
import io.mantisrx.common.codec.Codec;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.mql.jvm.core.Query;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.sourcejob.synthetic.core.MQLQueryManager;
import io.mantisrx.sourcejob.synthetic.core.TaggedData;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* Tags incoming events with ids of queries that evaluate to true against the data.
*
* Each event is first transformed into a Map, next each query from the list of Registered MQL queries
* is applied against the event. The event is tagged with the ids of queries that matched.
*
*/
@Slf4j
public class TaggingStage implements ScalarComputation<String, TaggedData> {
public static final String MANTIS_META_SOURCE_NAME = "mantis.meta.sourceName";
public static final String MANTIS_META_SOURCE_TIMESTAMP = "mantis.meta.timestamp";
public static final String MANTIS_QUERY_COUNTER = "mantis_query_out";
public static final String MQL_COUNTER = "mql_out";
public static final String MQL_FAILURE = "mql_failure";
public static final String MQL_CLASSLOADER_ERROR = "mql_classloader_error";
public static final String SYNTHETIC_REQUEST_SOURCE = "SyntheticRequestSource";
private AtomicBoolean errorLogged = new AtomicBoolean(false);
@Override
public Observable<TaggedData> call(Context context, Observable<String> dataO) {
final JsonSerializer jsonSerializer = new JsonSerializer();
return dataO
.map((event) -> {
try {
return jsonSerializer.toMap(event);
} catch (Exception e) {
log.error(e.getMessage());
return null;
}
})
.filter(Objects::nonNull)
.flatMapIterable(d -> tagData(d, context));
}
@Override
public void init(Context context) {
context.getMetricsRegistry().registerAndGet(new Metrics.Builder()
.name("mql")
.addCounter(MQL_COUNTER)
.addCounter(MQL_FAILURE)
.addCounter(MQL_CLASSLOADER_ERROR)
.addCounter(MANTIS_QUERY_COUNTER).build());
}
private List<TaggedData> tagData(Map<String, Object> d, Context context) {
List<TaggedData> taggedDataList = new ArrayList<>();
Metrics metrics = context.getMetricsRegistry().getMetric(new MetricGroupId("mql"));
Collection<Query> queries = MQLQueryManager.getInstance().getRegisteredQueries();
Iterator<Query> it = queries.iterator();
while (it.hasNext()) {
Query query = it.next();
try {
if (query.matches(d)) {
Map<String, Object> projected = query.project(d);
projected.put(MANTIS_META_SOURCE_NAME, SYNTHETIC_REQUEST_SOURCE);
projected.put(MANTIS_META_SOURCE_TIMESTAMP, System.currentTimeMillis());
TaggedData tg = new TaggedData(projected);
tg.addMatchedClient(query.getSubscriptionId());
taggedDataList.add(tg);
}
} catch (Exception ex) {
if (ex instanceof ClassNotFoundException) {
log.error("Error loading MQL: " + ex.getMessage());
ex.printStackTrace();
metrics.getCounter(MQL_CLASSLOADER_ERROR).increment();
} else {
ex.printStackTrace();
metrics.getCounter(MQL_FAILURE).increment();
log.error("MQL Error: " + ex.getMessage());
log.error("MQL Query: " + query.getRawQuery());
log.error("MQL Datum: " + d);
}
} catch (Error e) {
metrics.getCounter(MQL_FAILURE).increment();
if (!errorLogged.get()) {
log.error("caught Error when processing MQL {} on {}", query.getRawQuery(), d.toString(), e);
errorLogged.set(true);
}
}
}
return taggedDataList;
}
public static ScalarToScalar.Config<String, TaggedData> config() {
return new ScalarToScalar.Config<String, TaggedData>()
.concurrentInput()
.codec(TaggingStage.taggedDataCodec());
}
public static Codec<TaggedData> taggedDataCodec() {
return new Codec<TaggedData>() {
@Override
public TaggedData decode(byte[] bytes) {
return new TaggedData(new HashMap<>());
}
@Override
public byte[] encode(final TaggedData value) {
return new byte[128];
}
};
}
}
| 7,703 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-core/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-core/src/main/java/com/netflix/mantis/examples/core/WordCountPair.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.core;
import java.io.Serializable;
import lombok.Data;
/**
* A simple class that holds a word and a count of how many times it has occurred.
*/
@Data
public class WordCountPair implements Serializable {
private final String word;
private final int count;
}
| 7,704 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-core/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-core/src/main/java/com/netflix/mantis/examples/core/ObservableQueue.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.core;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import rx.Observable;
import rx.subjects.PublishSubject;
import rx.subjects.Subject;
/**
* An Observable that acts as a blocking queue. It is backed by a <code>Subject</code>
*
* @param <T>
*/
public class ObservableQueue<T> implements BlockingQueue<T>, Closeable {
private final Subject<T, T> subject = PublishSubject.<T>create().toSerialized();
public Observable<T> observe() {
return subject;
}
@Override
public boolean add(T t) {
return offer(t);
}
@Override
public boolean offer(T t) {
subject.onNext(t);
return true;
}
@Override
public void close() throws IOException {
subject.onCompleted();
}
@Override
public T remove() {
return noSuchElement();
}
@Override
public T poll() {
return null;
}
@Override
public T element() {
return noSuchElement();
}
private T noSuchElement() {
throw new NoSuchElementException();
}
@Override
public T peek() {
return null;
}
@Override
public void put(T t) throws InterruptedException {
offer(t);
}
@Override
public boolean offer(T t, long timeout, TimeUnit unit) throws InterruptedException {
return offer(t);
}
@Override
public T take() throws InterruptedException {
throw new UnsupportedOperationException("Use observe() instead");
}
@Override
public T poll(long timeout, TimeUnit unit) throws InterruptedException {
return null;
}
@Override
public int remainingCapacity() {
return 0;
}
@Override
public boolean remove(Object o) {
return false;
}
@Override
public boolean containsAll(Collection<?> c) {
return false;
}
@Override
public boolean addAll(Collection<? extends T> c) {
c.forEach(this::offer);
return true;
}
@Override
public boolean removeAll(Collection<?> c) {
return false;
}
@Override
public boolean retainAll(Collection<?> c) {
return false;
}
@Override
public void clear() {
}
@Override
public int size() {
return 0;
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public boolean contains(Object o) {
return false;
}
@Override
public Iterator<T> iterator() {
return Collections.emptyIterator();
}
@Override
public Object[] toArray() {
return new Object[0];
}
@Override
public <T> T[] toArray(T[] a) {
return a;
}
@Override
public int drainTo(Collection<? super T> c) {
return 0;
}
@Override
public int drainTo(Collection<? super T> c, int maxElements) {
return 0;
}
} | 7,705 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-core/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-core/src/main/java/com/netflix/mantis/examples/config/StageConfigs.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.config;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.KeyToScalar;
import io.mantisrx.runtime.ScalarToKey;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.codec.JacksonCodecs;
import java.util.Map;
public class StageConfigs {
public static ScalarToScalar.Config<String, String> scalarToScalarConfig() {
return new ScalarToScalar.Config<String, String>()
.codec(Codecs.string());
}
public static KeyToScalar.Config<String, Map<String, Object>, String> keyToScalarConfig() {
return new KeyToScalar.Config<String, Map<String, Object>, String>()
.description("sum events ")
.keyExpireTimeSeconds(10)
.codec(Codecs.string());
}
public static ScalarToKey.Config<String, String, Map<String, Object>> scalarToKeyConfig() {
return new ScalarToKey.Config<String, String, Map<String, Object>>()
.description("Group event data by ip")
.concurrentInput()
.keyExpireTimeSeconds(1)
.codec(JacksonCodecs.mapStringObject());
}
}
| 7,706 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-twitter-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-twitter-sample/src/main/java/com/netflix/mantis/examples/core/WordCountPair.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.core;
import lombok.Data;
/**
* A simple class that holds a word and a count of how many times it has occurred.
*/
@Data
public class WordCountPair {
private final String word;
private final int count;
}
| 7,707 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-twitter-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-twitter-sample/src/main/java/com/netflix/mantis/examples/core/ObservableQueue.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.core;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import rx.Observable;
import rx.subjects.PublishSubject;
import rx.subjects.Subject;
/**
* An Observable that acts as a blocking queue. It is backed by a <code>Subject</code>
*
* @param <T>
*/
public class ObservableQueue<T> implements BlockingQueue<T>, Closeable {
private final Subject<T, T> subject = PublishSubject.<T>create().toSerialized();
public Observable<T> observe() {
return subject;
}
@Override
public boolean add(T t) {
return offer(t);
}
@Override
public boolean offer(T t) {
subject.onNext(t);
return true;
}
@Override
public void close() throws IOException {
subject.onCompleted();
}
@Override
public T remove() {
return noSuchElement();
}
@Override
public T poll() {
return null;
}
@Override
public T element() {
return noSuchElement();
}
private T noSuchElement() {
throw new NoSuchElementException();
}
@Override
public T peek() {
return null;
}
@Override
public void put(T t) throws InterruptedException {
offer(t);
}
@Override
public boolean offer(T t, long timeout, TimeUnit unit) throws InterruptedException {
return offer(t);
}
@Override
public T take() throws InterruptedException {
throw new UnsupportedOperationException("Use observe() instead");
}
@Override
public T poll(long timeout, TimeUnit unit) throws InterruptedException {
return null;
}
@Override
public int remainingCapacity() {
return 0;
}
@Override
public boolean remove(Object o) {
return false;
}
@Override
public boolean containsAll(Collection<?> c) {
return false;
}
@Override
public boolean addAll(Collection<? extends T> c) {
c.forEach(this::offer);
return true;
}
@Override
public boolean removeAll(Collection<?> c) {
return false;
}
@Override
public boolean retainAll(Collection<?> c) {
return false;
}
@Override
public void clear() {
}
@Override
public int size() {
return 0;
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public boolean contains(Object o) {
return false;
}
@Override
public Iterator<T> iterator() {
return Collections.emptyIterator();
}
@Override
public Object[] toArray() {
return new Object[0];
}
@Override
public <T> T[] toArray(T[] a) {
return a;
}
@Override
public int drainTo(Collection<? super T> c) {
return 0;
}
@Override
public int drainTo(Collection<? super T> c, int maxElements) {
return 0;
}
} | 7,708 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-twitter-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-twitter-sample/src/main/java/com/netflix/mantis/examples/wordcount/TwitterDslJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.wordcount;
import com.netflix.mantis.examples.core.WordCountPair;
import com.netflix.mantis.examples.wordcount.sources.TwitterSource;
import io.mantisrx.common.JsonSerializer;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.core.MantisStream;
import io.mantisrx.runtime.core.WindowSpec;
import io.mantisrx.runtime.core.functions.SimpleReduceFunction;
import io.mantisrx.runtime.core.sinks.ObservableSinkImpl;
import io.mantisrx.runtime.core.sources.ObservableSourceImpl;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.runtime.sink.Sinks;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import lombok.extern.slf4j.Slf4j;
/**
* This sample demonstrates connecting to a twitter feed and counting the number of occurrences of words within a 10
* sec hopping window.
* Run the main method of this class and then look for a the SSE port in the output
* E.g
* <code> Serving modern HTTP SSE server sink on port: 8650 </code>
* You can curl this port <code> curl localhost:8650</code> to view the output of the job.
*
* To run via gradle
* ../gradlew execute --args='consumerKey consumerSecret token tokensecret'
*/
@Slf4j
public class TwitterDslJob extends MantisJobProvider<String> {
@Override
public Job<String> getJobInstance() {
final JsonSerializer jsonSerializer = new JsonSerializer();
return MantisStream.create(null)
.source(new ObservableSourceImpl<>(new TwitterSource()))
.map(event -> {
try {
return jsonSerializer.toMap(event);
} catch (Exception e) {
log.error("Failed to deserialize event {}", event, e);
return null;
}
})
// filter out english tweets
.filter((eventMap) -> {
if(eventMap.containsKey("lang") && eventMap.containsKey("text")) {
String lang = (String)eventMap.get("lang");
return "en".equalsIgnoreCase(lang);
}
return false;
}).map((eventMap) -> (String) eventMap.get("text"))
// tokenize the tweets into words
.flatMap(this::tokenize)
.keyBy(WordCountPair::getWord)
// On a hopping window of 10 seconds
.window(WindowSpec.timed(Duration.ofSeconds(10)))
.reduce((SimpleReduceFunction<WordCountPair>) (acc, item) -> {
if (acc.getWord() != null && !acc.getWord().isEmpty() && !acc.getWord().equals(item.getWord())) {
log.warn("keys dont match: acc ({}) vs item ({})", acc.getWord(), item.getWord());
}
return new WordCountPair(acc.getWord(), acc.getCount() + item.getCount());
})
.map(WordCountPair::toString)
// Reuse built in sink that eagerly subscribes and delivers data over SSE
.sink(new ObservableSinkImpl<>(Sinks.eagerSubscribe(Sinks.sse((String data) -> data))))
.metadata(new Metadata.Builder()
.name("TwitterSample")
.description("Connects to a Twitter feed")
.build())
.create();
}
private List<WordCountPair> tokenize(String text) {
StringTokenizer tokenizer = new StringTokenizer(text);
List<WordCountPair> wordCountPairs = new ArrayList<>();
while(tokenizer.hasMoreTokens()) {
String word = tokenizer.nextToken().replaceAll("\\s*", "").toLowerCase();
wordCountPairs.add(new WordCountPair(word,1));
}
return wordCountPairs;
}
public static void main(String[] args) {
String consumerKey = null;
String consumerSecret = null;
String token = null;
String tokenSecret = null;
if(args.length != 4) {
System.out.println("Usage: java com.netflix.mantis.examples.TwitterJob <consumerKey> <consumerSecret> <token> <tokenSecret");
System.exit(0);
} else {
consumerKey = args[0].trim();
consumerSecret = args[1].trim();
token = args[2].trim();
tokenSecret = args[3].trim();
}
LocalJobExecutorNetworked.execute(new TwitterDslJob().getJobInstance(),
new Parameter(TwitterSource.CONSUMER_KEY_PARAM, consumerKey),
new Parameter(TwitterSource.CONSUMER_SECRET_PARAM, consumerSecret),
new Parameter(TwitterSource.TOKEN_PARAM, token),
new Parameter(TwitterSource.TOKEN_SECRET_PARAM, tokenSecret)
);
}
}
| 7,709 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-twitter-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-twitter-sample/src/main/java/com/netflix/mantis/examples/wordcount/TwitterJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.wordcount;
import com.netflix.mantis.examples.config.StageConfigs;
import com.netflix.mantis.examples.core.WordCountPair;
import com.netflix.mantis.examples.wordcount.sources.TwitterSource;
import io.mantisrx.common.JsonSerializer;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.runtime.sink.Sinks;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* This sample demonstrates connecting to a twitter feed and counting the number of occurrences of words within a 10
* sec hopping window.
* Run the main method of this class and then look for a the SSE port in the output
* E.g
* <code> Serving modern HTTP SSE server sink on port: 8650 </code>
* You can curl this port <code> curl localhost:8650</code> to view the output of the job.
*
* To run via gradle
* ../gradlew execute --args='consumerKey consumerSecret token tokensecret'
*/
@Slf4j
public class TwitterJob extends MantisJobProvider<String> {
@Override
public Job<String> getJobInstance() {
final JsonSerializer jsonSerializer = new JsonSerializer();
return MantisJob
.source(new TwitterSource())
// Simply echoes the tweet
.stage((context, dataO) -> dataO
.map(event -> {
try {
return jsonSerializer.toMap(event);
} catch (Exception e) {
log.error("Failed to deserialize event {}", event, e);
return null;
}
})
// filter out english tweets
.filter((eventMap) -> {
if(eventMap.containsKey("lang") && eventMap.containsKey("text")) {
String lang = (String)eventMap.get("lang");
return "en".equalsIgnoreCase(lang);
}
return false;
}).map((eventMap) -> (String)eventMap.get("text"))
// tokenize the tweets into words
.flatMap((text) -> Observable.from(tokenize(text)))
// On a hopping window of 10 seconds
.window(10, TimeUnit.SECONDS)
.flatMap((wordCountPairObservable) -> wordCountPairObservable
// count how many times a word appears
.groupBy(WordCountPair::getWord)
.flatMap((groupO) -> groupO.reduce(0, (cnt, wordCntPair) -> cnt + 1)
.map((cnt) -> new WordCountPair(groupO.getKey(), cnt))))
.map(WordCountPair::toString)
.doOnNext((cnt) -> log.info(cnt))
, StageConfigs.scalarToScalarConfig())
// Reuse built in sink that eagerly subscribes and delivers data over SSE
.sink(Sinks.eagerSubscribe(Sinks.sse((String data) -> data)))
.metadata(new Metadata.Builder()
.name("TwitterSample")
.description("Connects to a Twitter feed")
.build())
.create();
}
private List<WordCountPair> tokenize(String text) {
StringTokenizer tokenizer = new StringTokenizer(text);
List<WordCountPair> wordCountPairs = new ArrayList<>();
while(tokenizer.hasMoreTokens()) {
String word = tokenizer.nextToken().replaceAll("\\s*", "").toLowerCase();
wordCountPairs.add(new WordCountPair(word,1));
}
return wordCountPairs;
}
public static void main(String[] args) {
String consumerKey = null;
String consumerSecret = null;
String token = null;
String tokenSecret = null;
if(args.length != 4) {
System.out.println("Usage: java com.netflix.mantis.examples.TwitterJob <consumerKey> <consumerSecret> <token> <tokenSecret");
System.exit(0);
} else {
consumerKey = args[0].trim();
consumerSecret = args[1].trim();
token = args[2].trim();
tokenSecret = args[3].trim();
}
LocalJobExecutorNetworked.execute(new TwitterJob().getJobInstance(),
new Parameter(TwitterSource.CONSUMER_KEY_PARAM, consumerKey),
new Parameter(TwitterSource.CONSUMER_SECRET_PARAM, consumerSecret),
new Parameter(TwitterSource.TOKEN_PARAM, token),
new Parameter(TwitterSource.TOKEN_SECRET_PARAM, tokenSecret)
);
}
}
| 7,710 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-twitter-sample/src/main/java/com/netflix/mantis/examples/wordcount | Create_ds/mantis/mantis-examples/mantis-examples-twitter-sample/src/main/java/com/netflix/mantis/examples/wordcount/sources/TwitterSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.wordcount.sources;
import com.netflix.mantis.examples.core.ObservableQueue;
import com.twitter.hbc.ClientBuilder;
import com.twitter.hbc.core.Constants;
import com.twitter.hbc.core.endpoint.StatusesFilterEndpoint;
import com.twitter.hbc.core.processor.StringDelimitedProcessor;
import com.twitter.hbc.httpclient.BasicClient;
import com.twitter.hbc.httpclient.auth.Authentication;
import com.twitter.hbc.httpclient.auth.OAuth1;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import rx.Observable;
/**
* A Mantis Source that wraps an underlying Twitter source based on the HorseBirdClient.
*/
public class TwitterSource implements Source<String> {
public static final String CONSUMER_KEY_PARAM = "consumerKey";
public static final String CONSUMER_SECRET_PARAM = "consumerSecret";
public static final String TOKEN_PARAM = "token";
public static final String TOKEN_SECRET_PARAM = "tokenSecret";
public static final String TERMS_PARAM = "terms";
private final ObservableQueue<String> twitterObservable = new ObservableQueue<>();
private transient BasicClient client;
@Override
public Observable<Observable<String>> call(Context context, Index index) {
return Observable.just(twitterObservable.observe());
}
/**
* Define parameters required by this source.
*
* @return
*/
@Override
public List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> params = Lists.newArrayList();
// Consumer key
params.add(new StringParameter()
.name(CONSUMER_KEY_PARAM)
.description("twitter consumer key")
.validator(Validators.notNullOrEmpty())
.required()
.build());
params.add(new StringParameter()
.name(CONSUMER_SECRET_PARAM)
.description("twitter consumer secret")
.validator(Validators.notNullOrEmpty())
.required()
.build());
params.add(new StringParameter()
.name(TOKEN_PARAM)
.description("twitter token")
.validator(Validators.notNullOrEmpty())
.required()
.build());
params.add(new StringParameter()
.name(TOKEN_SECRET_PARAM)
.description("twitter token secret")
.validator(Validators.notNullOrEmpty())
.required()
.build());
params.add(new StringParameter()
.name(TERMS_PARAM)
.description("terms to follow")
.validator(Validators.notNullOrEmpty())
.defaultValue("Netflix,Dark")
.build());
return params;
}
/**
* Init method is called only once during initialization. It is the ideal place to perform one time
* configuration actions.
*
* @param context Provides access to Mantis system information like JobId, Job parameters etc
* @param index This provides access to the unique workerIndex assigned to this container. It also provides
* the total number of workers of this job.
*/
@Override
public void init(Context context, Index index) {
String consumerKey = (String) context.getParameters().get(CONSUMER_KEY_PARAM);
String consumerSecret = (String) context.getParameters().get(CONSUMER_SECRET_PARAM);
String token = (String) context.getParameters().get(TOKEN_PARAM);
String tokenSecret = (String) context.getParameters().get(TOKEN_SECRET_PARAM);
String terms = (String) context.getParameters().get(TERMS_PARAM);
Authentication auth = new OAuth1(consumerKey,
consumerSecret,
token,
tokenSecret);
StatusesFilterEndpoint endpoint = new StatusesFilterEndpoint();
String[] termArray = terms.split(",");
List<String> termsList = Arrays.asList(termArray);
endpoint.trackTerms(termsList);
client = new ClientBuilder()
.name("twitter-source")
.hosts(Constants.STREAM_HOST)
.endpoint(endpoint)
.authentication(auth)
.processor(new StringDelimitedProcessor(twitterObservable))
.build();
client.connect();
}
@Override
public void close() throws IOException {
client.stop();
}
}
| 7,711 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-twitter-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-twitter-sample/src/main/java/com/netflix/mantis/examples/config/StageConfigs.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.config;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.KeyToScalar;
import io.mantisrx.runtime.ScalarToKey;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.codec.JacksonCodecs;
import java.util.Map;
public class StageConfigs {
public static ScalarToScalar.Config<String, String> scalarToScalarConfig() {
return new ScalarToScalar.Config<String, String>()
.codec(Codecs.string());
}
public static KeyToScalar.Config<String, Map<String, Object>, String> keyToScalarConfig() {
return new KeyToScalar.Config<String, Map<String, Object>, String>()
.description("sum events ")
.keyExpireTimeSeconds(10)
.codec(Codecs.string());
}
public static ScalarToKey.Config<String, String, Map<String, Object>> scalarToKeyConfig() {
return new ScalarToKey.Config<String, String, Map<String, Object>>()
.description("Group event data by ip")
.concurrentInput()
.keyExpireTimeSeconds(1)
.codec(JacksonCodecs.mapStringObject());
}
}
| 7,712 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples/RequestAggregationJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples;
import com.netflix.mantis.samples.source.RandomRequestSource;
import com.netflix.mantis.samples.stage.AggregationStage;
import com.netflix.mantis.samples.stage.CollectStage;
import com.netflix.mantis.samples.stage.GroupByStage;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.sink.Sinks;
import lombok.extern.slf4j.Slf4j;
/**
* This sample demonstrates the use of a multi-stage job in Mantis. Multi-stage jobs are useful when a single
* container is incapable of processing the entire stream of events.
* Each stage represents one of these types of
* computations Scalar->Scalar, Scalar->Group, Group->Scalar, Group->Group.
*
* At deploy time the user can configure the number workers for each stage and the resource requirements for each worker.
* This sample has 3 stages
* 1. {@link GroupByStage} Receives the raw events, groups them by their category and sends it to the workers of stage 2 in such a way
* that all events for a particular group will land on the exact same worker of stage 2.
* 2. {@link AggregationStage} Receives events tagged by their group from the previous stage. Windows over them and
* sums up the counts of each group it has seen.
* 3. {@link CollectStage} Recieves the aggregates generated by the previous stage, collects them over a window and
* generates a consolidated report which is sent to the default Server Sent Event (SSE) sink.
*
* Run this sample by executing the main method of this class. Then look for the SSE port where the output of this job
* will be available for streaming. E.g Serving modern HTTP SSE server sink on port: 8299
* via command line do ../gradlew execute
*/
@Slf4j
public class RequestAggregationJob extends MantisJobProvider<String> {
@Override
public Job<String> getJobInstance() {
return MantisJob
// Stream Request Events from our random data generator source
.source(new RandomRequestSource())
// Groups requests by path
.stage(new GroupByStage(), GroupByStage.config())
// Computes count per path over a window
.stage(new AggregationStage(), AggregationStage.config())
// Collects the data and makes it availabe over SSE
.stage(new CollectStage(), CollectStage.config())
// Reuse built in sink that eagerly subscribes and delivers data over SSE
.sink(Sinks.sysout())
.metadata(new Metadata.Builder()
.name("GroupByPath")
.description("Connects to a random data generator source"
+ " and counts the number of requests for each uri within a window")
.build())
.create();
}
public static void main(String[] args) {
// To run locally we use the LocalJobExecutor
LocalJobExecutorNetworked.execute(new RequestAggregationJob().getJobInstance());
}
}
| 7,713 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples/RequestAggregationDslJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples;
import com.netflix.mantis.samples.proto.AggregationReport;
import com.netflix.mantis.samples.proto.RequestAggregation;
import com.netflix.mantis.samples.proto.RequestEvent;
import com.netflix.mantis.samples.source.RandomRequestSource;
import com.netflix.mantis.samples.stage.AggregationStage;
import com.netflix.mantis.samples.stage.CollectStage;
import com.netflix.mantis.samples.stage.GroupByStage;
import io.mantisrx.runtime.Config;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.core.MantisStream;
import io.mantisrx.runtime.core.WindowSpec;
import io.mantisrx.runtime.core.functions.ReduceFunction;
import io.mantisrx.runtime.core.sinks.ObservableSinkImpl;
import io.mantisrx.runtime.core.sources.ObservableSourceImpl;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.sink.Sinks;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.time.Duration;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import lombok.extern.slf4j.Slf4j;
/**
* This sample demonstrates the use of a multi-stage job in Mantis. Multi-stage jobs are useful when a single
* container is incapable of processing the entire stream of events.
* Each stage represents one of these types of
* computations Scalar->Scalar, Scalar->Group, Group->Scalar, Group->Group.
*
* At deploy time the user can configure the number workers for each stage and the resource requirements for each worker.
* This sample has 3 stages
* 1. {@link GroupByStage} Receives the raw events, groups them by their category and sends it to the workers of stage 2 in such a way
* that all events for a particular group will land on the exact same worker of stage 2.
* 2. {@link AggregationStage} Receives events tagged by their group from the previous stage. Windows over them and
* sums up the counts of each group it has seen.
* 3. {@link CollectStage} Recieves the aggregates generated by the previous stage, collects them over a window and
* generates a consolidated report which is sent to the default Server Sent Event (SSE) sink.
*
* Run this sample by executing the main method of this class. Then look for the SSE port where the output of this job
* will be available for streaming. E.g Serving modern HTTP SSE server sink on port: 8299
* via command line do ../gradlew execute
*/
@Slf4j
public class RequestAggregationDslJob extends MantisJobProvider<String> {
private static final ObjectMapper mapper = new ObjectMapper();
@Override
public Job<String> getJobInstance() {
String groupByParam = "path";
Config<String> jobConfig = MantisStream.create(null)
.source(new ObservableSourceImpl<>(new RandomRequestSource()))
.keyBy(x -> {
if ("path".equalsIgnoreCase(groupByParam)) {
return x.getRequestPath();
} else {
return x.getIpAddress();
}
})
.window(WindowSpec.timed(Duration.ofSeconds(5)))
.reduce(new ReduceFunction<RequestEvent, RequestAggregation>() {
@Override
public RequestAggregation initialValue() {
return RequestAggregation.builder().build();
}
@Override
public RequestAggregation reduce(RequestAggregation acc, RequestEvent requestEvent) {
// TODO(hmittal): Need access to key-by key
return RequestAggregation.builder()
.path(requestEvent.getRequestPath())
.count(acc.getCount() + requestEvent.getLatency())
.build();
}
})
.materialize()
.keyBy(x -> "")
.window(WindowSpec.timed(Duration.ofSeconds(5)))
.reduce(new ReduceFunction<RequestAggregation, AggregationReport>() {
@Override
public AggregationReport initialValue() {
return new AggregationReport(new ConcurrentHashMap<>());
}
@Override
public AggregationReport reduce(AggregationReport acc, RequestAggregation item) {
if (item != null && item.getPath() != null) {
acc.getPathToCountMap().put(item.getPath(), item.getCount());
}
return acc;
}
})
.map(report -> {
try {
return mapper.writeValueAsString(report);
} catch (JsonProcessingException e) {
log.error(e.getMessage());
return null;
}
})
.filter(Objects::nonNull)
// Reuse built in sink that eagerly subscribes and delivers data over SSE
.sink(new ObservableSinkImpl<>(Sinks.sysout()));
return jobConfig
.metadata(new Metadata.Builder()
.name("GroupByPath")
.description("Connects to a random data generator source"
+ " and counts the number of requests for each uri within a window")
.build())
.create();
}
public static void main(String[] args) {
// To run locally we use the LocalJobExecutor
LocalJobExecutorNetworked.execute(new RequestAggregationDslJob().getJobInstance());
}
}
| 7,714 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples/proto/RequestEvent.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.proto;
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io.mantisrx.common.codec.Codec;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectReader;
import java.io.IOException;
import java.io.Serializable;
import lombok.Builder;
import lombok.Data;
/**
* A simple POJO that holds data about a request event.
*/
@Data
@Builder
public class RequestEvent implements Serializable {
private static final ObjectMapper mapper = new ObjectMapper();
private static final ObjectReader requestEventReader = mapper.readerFor(RequestEvent.class);
private final String requestPath;
private final String ipAddress;
private final int latency;
/**
* The codec defines how this class should be serialized before transporting across network.
* @return
*/
public static Codec<RequestEvent> requestEventCodec() {
return new Codec<RequestEvent>() {
@Override
public RequestEvent decode(byte[] bytes) {
try {
return requestEventReader.readValue(bytes);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public byte[] encode(final RequestEvent value) {
try {
return mapper.writeValueAsBytes(value);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
}
}
| 7,715 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples/proto/RequestAggregation.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.proto;
import io.mantisrx.common.codec.Codec;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectReader;
import java.io.IOException;
import java.io.Serializable;
import lombok.Builder;
import lombok.Data;
/**
* A simple POJO that holds the count of how many times a particular request path was invoked.
*/
@Data
@Builder
public class RequestAggregation implements Serializable {
private static final ObjectMapper mapper = new ObjectMapper();
private static final ObjectReader requestAggregationReader = mapper.readerFor(RequestAggregation.class);
private String path;
private int count;
/**
* Codec is used to customize how data is serialized before transporting across network boundaries.
* @return
*/
public static Codec<RequestAggregation> requestAggregationCodec() {
return new Codec<RequestAggregation>() {
@Override
public RequestAggregation decode(byte[] bytes) {
try {
return requestAggregationReader.readValue(bytes);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public byte[] encode(final RequestAggregation value) {
try {
return mapper.writeValueAsBytes(value);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
}
}
| 7,716 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples/proto/AggregationReport.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.proto;
import java.io.Serializable;
import java.util.Map;
import lombok.Data;
/**
* A simple POJO which holds the result of aggregating counts per request path.
*/
@Data
public class AggregationReport implements Serializable {
private final Map<String, Integer> pathToCountMap;
}
| 7,717 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples/source/RandomRequestSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.source;
import com.netflix.mantis.samples.proto.RequestEvent;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import net.andreinc.mockneat.MockNeat;
import rx.Observable;
/**
* Generates random set of RequestEvents at a preconfigured interval.
*/
@Slf4j
public class RandomRequestSource implements Source<RequestEvent> {
private MockNeat mockDataGenerator;
@Override
public Observable<Observable<RequestEvent>> call(Context context, Index index) {
return Observable.just(Observable.interval(250, TimeUnit.MILLISECONDS).map((tick) -> {
String ip = mockDataGenerator.ipv4s().get();
String path = mockDataGenerator.probabilites(String.class)
.add(0.1, "/login")
.add(0.2, "/genre/horror")
.add(0.5, "/genre/comedy")
.add(0.2, "/mylist")
.get();
return RequestEvent.builder().ipAddress(ip).requestPath(path).latency(1).build();
}).doOnNext((event) -> {
log.debug("Generated Event {}", event);
}));
}
@Override
public void init(Context context, Index index) {
mockDataGenerator = MockNeat.threadLocal();
}
@Override
public void close() throws IOException {
}
}
| 7,718 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples/stage/AggregationStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.stage;
import com.netflix.mantis.samples.proto.RequestAggregation;
import com.netflix.mantis.samples.proto.RequestEvent;
import io.mantisrx.common.MantisGroup;
import io.mantisrx.common.codec.Codec;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.GroupToScalar;
import io.mantisrx.runtime.computation.GroupToScalarComputation;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
import rx.observables.GroupedObservable;
/**
* This is the 2nd stage of this three stage job. It receives events from {@link GroupByStage}
* This stage converts Grouped Events to Scalar events {@link GroupToScalarComputation} Typically used in the
* reduce portion of a map reduce computation.
*
* This stage receives an <code>Observable<MantisGroup<String,RequestEvent>></code>. This represents a stream of
* request events tagged by the URI Path they belong to.
* This stage then groups the events by the path and and counts the number of invocations of each path over a window.
*/
@Slf4j
public class AggregationStage implements GroupToScalarComputation<String, RequestEvent, RequestAggregation> {
public static final String AGGREGATION_DURATION_MSEC_PARAM = "AggregationDurationMsec";
int aggregationDurationMsec;
private Observable<? extends RequestAggregation> aggregate(GroupedObservable<String, MantisGroup<String, RequestEvent>> go) {
return go.reduce(RequestAggregation.builder().build(), (accumulator, value) -> {
log.info("aggregating " + go.getKey() + " on Thread " + Thread.currentThread().getName());
accumulator.setCount(accumulator.getCount() + value.getValue().getLatency());
accumulator.setPath(go.getKey());
return accumulator;
})
// .map((count) -> RequestAggregation.builder().count(count).path(go.getKey()).build())
.doOnNext((aggregate) -> {
log.debug("Generated aggregate {}", aggregate);
});
}
/**
* The aggregate method is invoked by the Mantis runtime while executing the job.
* @param context Provides metadata information related to the current job.
* @param mantisGroupO This is an Observable of {@link MantisGroup} events. Each event is a pair of the Key -> uri Path and
* the {@link RequestEvent} event itself.
* @return
*/
@Override
public Observable<RequestAggregation> call(Context context, Observable<MantisGroup<String, RequestEvent>> mantisGroupO) {
return mantisGroupO
.doOnNext((mg) -> {
log.info("Received " + mg.getKeyValue() + " on Thread " + Thread.currentThread().getName());
})
.window(aggregationDurationMsec, TimeUnit.MILLISECONDS)
.flatMap((omg) -> omg.groupBy(MantisGroup::getKeyValue)
.flatMap(// .map((count) -> RequestAggregation.builder().count(count).path(go.getKey()).build())
this::aggregate
));
}
/**
* Invoked only once during job startup. A good place to add one time initialization actions.
* @param context
*/
@Override
public void init(Context context) {
aggregationDurationMsec = (int)context.getParameters().get(AGGREGATION_DURATION_MSEC_PARAM, 1000);
}
/**
* Provides the Mantis runtime configuration information about the type of computation done by this stage.
* E.g in this case it specifies this is a GroupToScalar computation and also provides a {@link Codec} on how to
* serialize the {@link RequestAggregation} events before sending it to the {@link CollectStage}
* @return
*/
public static GroupToScalar.Config<String, RequestEvent, RequestAggregation> config(){
return new GroupToScalar.Config<String, RequestEvent,RequestAggregation>()
.description("sum events for a path")
.codec(RequestAggregation.requestAggregationCodec())
.concurrentInput()
.withParameters(getParameters());
}
/**
* Here we declare stage specific parameters.
* @return
*/
public static List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> params = new ArrayList<>();
// Aggregation duration
params.add(new IntParameter()
.name(AGGREGATION_DURATION_MSEC_PARAM)
.description("window size for aggregation")
.validator(Validators.range(100, 10000))
.defaultValue(5000)
.build()) ;
return params;
}
}
| 7,719 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples/stage/CollectStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.stage;
import com.netflix.mantis.samples.proto.AggregationReport;
import com.netflix.mantis.samples.proto.RequestAggregation;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* This is the final stage of this 3 stage job. It receives events from {@link AggregationStage}
* The role of this stage is to collect aggregates generated by the previous stage for all the groups within
* a window and generate a unified report of them.
*/
@Slf4j
public class CollectStage implements ScalarComputation<RequestAggregation,String> {
private static final ObjectMapper mapper = new ObjectMapper();
@Override
public Observable<String> call(Context context, Observable<RequestAggregation> requestAggregationO) {
return requestAggregationO
.window(5, TimeUnit.SECONDS)
.flatMap((requestAggO) -> requestAggO
.reduce(new RequestAggregationAccumulator(),(acc, requestAgg) -> acc.addAggregation(requestAgg))
.map(RequestAggregationAccumulator::generateReport)
.doOnNext((report) -> {
log.debug("Generated Collection report {}", report);
})
)
.map((report) -> {
try {
return mapper.writeValueAsString(report);
} catch (JsonProcessingException e) {
log.error(e.getMessage());
return null;
}
}).filter(Objects::nonNull);
}
@Override
public void init(Context context) {
}
public static ScalarToScalar.Config<RequestAggregation,String> config(){
return new ScalarToScalar.Config<RequestAggregation,String>()
.codec(Codecs.string());
}
/**
* The accumulator class as the name suggests accumulates all aggregates seen during a window and
* generates a consolidated report at the end.
*/
static class RequestAggregationAccumulator {
private final Map<String, Integer> pathToCountMap = new HashMap<>();
public RequestAggregationAccumulator() {}
public RequestAggregationAccumulator addAggregation(RequestAggregation agg) {
pathToCountMap.put(agg.getPath(), agg.getCount());
return this;
}
public AggregationReport generateReport() {
log.info("Generated report from=> {}", pathToCountMap);
return new AggregationReport(pathToCountMap);
}
}
}
| 7,720 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis/mantis-examples/mantis-examples-groupby-sample/src/main/java/com/netflix/mantis/samples/stage/GroupByStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.stage;
import com.netflix.mantis.samples.proto.RequestEvent;
import io.mantisrx.common.MantisGroup;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToGroup;
import io.mantisrx.runtime.computation.ToGroupComputation;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import java.util.ArrayList;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* This is the first stage of this 3 stage job. It is at the head of the computation DAG
* This stage converts Scalar Events to Grouped Events {@link ToGroupComputation}. The grouped events are then
* send to the next stage of the Mantis Job in a way such that all events belonging to a particular group will
* land on the same worker of the next stage.
*
* It receives a stream of {@link RequestEvent} and groups them by either the path or the IP address
* based on the parameters passed by the user.
*/
@Slf4j
public class GroupByStage implements ToGroupComputation<RequestEvent, String, RequestEvent> {
private static final String GROUPBY_FIELD_PARAM = "groupByField";
private boolean groupByPath = true;
@Override
public Observable<MantisGroup<String, RequestEvent>> call(Context context, Observable<RequestEvent> requestEventO) {
return requestEventO
.map((requestEvent) -> {
if(groupByPath) {
return new MantisGroup<>(requestEvent.getRequestPath(), requestEvent);
} else {
return new MantisGroup<>(requestEvent.getIpAddress(), requestEvent);
}
});
}
@Override
public void init(Context context) {
String groupByField = (String)context.getParameters().get(GROUPBY_FIELD_PARAM,"path");
groupByPath = groupByField.equalsIgnoreCase("path") ? true : false;
}
/**
* Here we declare stage specific parameters.
* @return
*/
public static List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> params = new ArrayList<>();
// Group by field
params.add(new StringParameter()
.name(GROUPBY_FIELD_PARAM)
.description("The key to group events by")
.validator(Validators.notNullOrEmpty())
.defaultValue("path")
.build()) ;
return params;
}
public static ScalarToGroup.Config<RequestEvent, String, RequestEvent> config(){
return new ScalarToGroup.Config<RequestEvent, String, RequestEvent>()
.description("Group event data by path/ip")
.concurrentInput() // signifies events can be processed parallely
.withParameters(getParameters())
.codec(RequestEvent.requestEventCodec());
}
}
| 7,721 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/Application.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.spectator.nflx.SpectatorModule;
import io.mantisrx.publish.api.EventPublisher;
import io.mantisrx.publish.netty.guice.MantisRealtimeEventsPublishModule;
/**
* A simple example that uses Guice to inject the {@link EventPublisher} part of the mantis-publish library
* to send events to Mantis.
*
* The mantis-publish library provides on-demand source side filtering via MQL. When a user publishes
* events via this library the events may not be actually shipped to Mantis. A downstream consumer needs
* to first register a query and the query needs to match events published by the user.
*/
public class Application {
public static void main(String [] args) {
Injector injector = Guice.createInjector(new BasicModule(), new ArchaiusModule(),
new MantisRealtimeEventsPublishModule(), new SpectatorModule());
IDataPublisher publisher = injector.getInstance(IDataPublisher.class);
publisher.generateAndSendEventsToMantis();
}
}
| 7,722 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/BasicModule.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample;
import com.google.inject.AbstractModule;
public class BasicModule extends AbstractModule {
@Override
protected void configure() {
bind(IDataPublisher.class).to(SampleDataPublisher.class);
bind(IDataGenerator.class).to(DataGenerator.class);
}
}
| 7,723 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/SampleDataPublisher.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample;
import com.google.inject.Inject;
import io.mantisrx.publish.api.Event;
import io.mantisrx.publish.api.EventPublisher;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* A simple example that uses Guice to inject the {@link EventPublisher} part of the mantis-publish library
* to send events to Mantis.
*
* The mantis-publish library provides on-demand source side filtering via MQL. When a user publishes
* events via this library the events may not be actually shipped to Mantis. A downstream consumer needs
* to first register a query and the query needs to match events published by the user.
*
*/
@Slf4j
public class SampleDataPublisher implements IDataPublisher{
@Inject
EventPublisher publisher;
@Inject
DataGenerator dataGenerator;
/**
* Generates random events at a fixed rate and publishes them to the mantis-publish library.
* Here the events are published to the defaultStream.
*/
@Override
public void generateAndSendEventsToMantis() {
dataGenerator
.generateEvents()
.map((requestEvent) -> new Event(requestEvent.toMap()))
.flatMap((event) -> Observable.from(publisher.publish(event)
.toCompletableFuture()))
.toBlocking()
.subscribe((status) -> {
log.info("Mantis publish JavaApp send event status => {}", status);
});
}
}
| 7,724 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/IDataPublisher.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample;
public interface IDataPublisher {
void generateAndSendEventsToMantis();
}
| 7,725 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/IDataGenerator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample;
import com.netflix.mantis.examples.mantispublishsample.proto.RequestEvent;
import rx.Observable;
/**
* A data generator that generates a stream of {@link RequestEvent} at a fixed interval.
*/
public interface IDataGenerator {
Observable<RequestEvent> generateEvents();
}
| 7,726 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/DataGenerator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample;
import com.netflix.mantis.examples.mantispublishsample.proto.RequestEvent;
import java.util.concurrent.TimeUnit;
import net.andreinc.mockneat.MockNeat;
import rx.Observable;
/**
* Uses MockNeat to generate a random stream of events. Each event represents a hypothetical request
* made by an end user to this service.
*/
public class DataGenerator implements IDataGenerator {
private final int rateMs = 1000;
private final MockNeat mockDataGenerator = MockNeat.threadLocal();
@Override
public Observable<RequestEvent> generateEvents() {
return Observable
.interval(rateMs, TimeUnit.MILLISECONDS)
.map((tick) -> generateEvent());
}
private RequestEvent generateEvent() {
String path = mockDataGenerator.probabilites(String.class)
.add(0.1, "/login")
.add(0.2, "/genre/horror")
.add(0.5, "/genre/comedy")
.add(0.2, "/mylist")
.get();
String deviceType = mockDataGenerator.probabilites(String.class)
.add(0.1, "ps4")
.add(0.1, "xbox")
.add(0.2, "browser")
.add(0.3, "ios")
.add(0.3, "android")
.get();
String userId = mockDataGenerator.strings().size(10).get();
int status = mockDataGenerator.probabilites(Integer.class)
.add(0.1,500)
.add(0.7,200)
.add(0.2,500)
.get();
String country = mockDataGenerator.countries().names().get();
return RequestEvent.builder()
.status(status)
.uri(path)
.country(country)
.userId(userId)
.deviceType(deviceType)
.build();
}
}
| 7,727 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample | Create_ds/mantis/mantis-examples/mantis-examples-mantis-publish-sample/src/main/java/com/netflix/mantis/examples/mantispublishsample/proto/RequestEvent.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.mantispublishsample.proto;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectReader;
import java.util.HashMap;
import java.util.Map;
import lombok.Builder;
import lombok.Data;
/**
* Represents a Request Event a service may receive.
*/
@Data
@Builder
public class RequestEvent {
private static final ObjectMapper mapper = new ObjectMapper();
private static final ObjectReader requestEventReader = mapper.readerFor(RequestEvent.class);
private final String userId;
private final String uri;
private final int status;
private final String country;
private final String deviceType;
public Map<String, Object> toMap() {
Map<String, Object> data = new HashMap<>();
data.put("userId", userId);
data.put("uri", uri);
data.put("status", status);
data.put("country", country);
data.put("deviceType", deviceType);
return data;
}
public String toJsonString() {
try {
return mapper.writeValueAsString(this);
} catch (JsonProcessingException e) {
e.printStackTrace();
return null;
}
}
}
| 7,728 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-wordcount/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-wordcount/src/main/java/com/netflix/mantis/examples/wordcount/WordCountJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.wordcount;
import com.netflix.mantis.examples.config.StageConfigs;
import com.netflix.mantis.examples.core.WordCountPair;
import com.netflix.mantis.examples.wordcount.sources.IlliadSource;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.sink.Sinks;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* This sample demonstrates ingesting data from a text file and counting the number of occurrences of words within a 10
* sec hopping window.
* Run the main method of this class and then look for a the SSE port in the output
* E.g
* <code> Serving modern HTTP SSE server sink on port: 8650 </code>
* You can curl this port <code> curl localhost:8650</code> to view the output of the job.
*
* To run via gradle
* /gradlew :mantis-examples-wordcount:execute
*/
@Slf4j
public class WordCountJob extends MantisJobProvider<String> {
@Override
public Job<String> getJobInstance() {
return MantisJob
.source(new IlliadSource())
// Simply echoes the tweet
.stage((context, dataO) -> dataO
// Tokenize
.flatMap((text) -> Observable.from(tokenize(text)))
// On a hopping window of 10 seconds
.window(10, TimeUnit.SECONDS)
.flatMap((wordCountPairObservable) -> wordCountPairObservable
// count how many times a word appears
.groupBy(WordCountPair::getWord)
.flatMap((groupO) -> groupO.reduce(0, (cnt, wordCntPair) -> cnt + 1)
.map((cnt) -> new WordCountPair(groupO.getKey(), cnt))))
.map(WordCountPair::toString)
, StageConfigs.scalarToScalarConfig())
// Reuse built in sink that eagerly subscribes and delivers data over SSE
.sink(Sinks.eagerSubscribe(Sinks.sse((String data) -> data)))
.metadata(new Metadata.Builder()
.name("WordCount")
.description("Reads Homer's The Illiad faster than we can.")
.build())
.create();
}
private List<WordCountPair> tokenize(String text) {
StringTokenizer tokenizer = new StringTokenizer(text);
List<WordCountPair> wordCountPairs = new ArrayList<>();
while(tokenizer.hasMoreTokens()) {
String word = tokenizer.nextToken().replaceAll("\\s*", "").toLowerCase();
wordCountPairs.add(new WordCountPair(word,1));
}
return wordCountPairs;
}
public static void main(String[] args) {
LocalJobExecutorNetworked.execute(new WordCountJob().getJobInstance());
}
}
| 7,729 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-wordcount/src/main/java/com/netflix/mantis/examples | Create_ds/mantis/mantis-examples/mantis-examples-wordcount/src/main/java/com/netflix/mantis/examples/wordcount/WordCountDslJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.wordcount;
import com.netflix.mantis.examples.core.WordCountPair;
import com.netflix.mantis.examples.wordcount.sources.IlliadSource;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.core.MantisStream;
import io.mantisrx.runtime.core.WindowSpec;
import io.mantisrx.runtime.core.functions.SimpleReduceFunction;
import io.mantisrx.runtime.core.sinks.ObservableSinkImpl;
import io.mantisrx.runtime.core.sources.ObservableSourceImpl;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.sink.Sinks;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import lombok.extern.slf4j.Slf4j;
/**
* This sample demonstrates ingesting data from a text file and counting the number of occurrences of words within a 10
* sec hopping window.
* Run the main method of this class and then look for a the SSE port in the output
* E.g
* <code> Serving modern HTTP SSE server sink on port: 8650 </code>
* You can curl this port <code> curl localhost:8650</code> to view the output of the job.
*
* To run via gradle
* /gradlew :mantis-examples-wordcount:execute
*/
@Slf4j
public class WordCountDslJob extends MantisJobProvider<String> {
@Override
public Job<String> getJobInstance() {
return MantisStream.create(null)
.source(new ObservableSourceImpl<>(new IlliadSource()))
.flatMap(this::tokenize)
.map(x -> {
// this guards against TooLongFrameException for some reason, need to investigate!
try {
Thread.sleep(0, 10000);
} catch (InterruptedException ignored) {
}
return x;
})
.keyBy(WordCountPair::getWord)
.window(WindowSpec.timed(Duration.ofSeconds(10)))
.reduce((SimpleReduceFunction<WordCountPair>) (acc, item) -> {
if (acc.getWord() != null && !acc.getWord().isEmpty() && !acc.getWord().equals(item.getWord())) {
log.warn("keys dont match: acc ({}) vs item ({})", acc.getWord(), item.getWord());
}
return new WordCountPair(acc.getWord(), acc.getCount() + item.getCount());
})
.map(WordCountPair::toString)
// Reuse built in sink that eagerly subscribes and delivers data over SSE
.sink(new ObservableSinkImpl<>(Sinks.eagerSubscribe(Sinks.sse((String data) -> data))))
.metadata(new Metadata.Builder()
.name("WordCount")
.description("Reads Homer's The Illiad faster than we can.")
.build())
.create();
}
private List<WordCountPair> tokenize(String text) {
StringTokenizer tokenizer = new StringTokenizer(text);
List<WordCountPair> wordCountPairs = new ArrayList<>();
while(tokenizer.hasMoreTokens()) {
String word = tokenizer.nextToken().replaceAll("\\s*", "").toLowerCase();
wordCountPairs.add(new WordCountPair(word,1));
}
return wordCountPairs;
}
public static void main(String[] args) {
LocalJobExecutorNetworked.execute(new WordCountDslJob().getJobInstance());
}
}
| 7,730 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-wordcount/src/main/java/com/netflix/mantis/examples/wordcount | Create_ds/mantis/mantis-examples/mantis-examples-wordcount/src/main/java/com/netflix/mantis/examples/wordcount/sources/IlliadSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.examples.wordcount.sources;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import java.io.IOException;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import lombok.extern.log4j.Log4j;
import rx.Observable;
/**
* Ignore the contents of this file for the tutorial. The purpose is just to generate a stream of interesting data
* on which we can word count.
*/
@Log4j
public class IlliadSource implements Source<String> {
@Override
public Observable<Observable<String>> call(Context context, Index index) {
return Observable.interval(10, TimeUnit.SECONDS)
.map(__ -> {
try {
Path path = Paths.get(getClass().getClassLoader()
.getResource("illiad.txt").toURI());
return Observable.from(() -> {
try {
return Files.lines(path).iterator();
} catch (IOException ex) {
log.error("IOException while reading illiad.txt from resources", ex);
}
return Stream.<String>empty().iterator();
}
);
} catch (URISyntaxException ex) {
log.error("URISyntaxException while loading illiad.txt from resources.", ex);
}
return Observable.empty();
});
}
@Override
public void close() throws IOException {
}
}
| 7,731 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-jobconnector-sample/src/main/java/com/netflix/mantis | Create_ds/mantis/mantis-examples/mantis-examples-jobconnector-sample/src/main/java/com/netflix/mantis/samples/JobConnectorJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples;
import com.netflix.mantis.samples.stage.EchoStage;
import io.mantisrx.connector.job.core.MantisSourceJobConnector;
import io.mantisrx.connector.job.source.JobSource;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.runtime.sink.Sinks;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
/**
* This sample demonstrates how to connect to the output of another job using the {@link JobSource}
* If the target job is a source job then you can request a filtered stream of events from the source job
* by passing an MQL query.
* In this example we connect to the latest running instance of SyntheticSourceJob using the query
* select country from stream where status==500 and simply echo the output.
* <p>
* Run this sample by executing the main method of this class. Then look for the SSE port where the output of this job
* will be available for streaming. E.g Serving modern HTTP SSE server sink on port: 8299
* via command line do ../gradlew execute
* <p>
* Note: this sample may not work in your IDE as the Mantis runtime needs to discover the location of the
* SyntheticSourceJob.
*/
@Slf4j
public class JobConnectorJob extends MantisJobProvider<String> {
@Override
public Job<String> getJobInstance() {
return MantisJob
// Stream Events from a job specified via job parameters
.source(new JobSource())
// Simple echoes the data
.stage(new EchoStage(), EchoStage.config())
// Reuse built in sink that eagerly subscribes and delivers data over SSE
.sink(Sinks.eagerSubscribe(
Sinks.sse((String data) -> data)))
.metadata(new Metadata.Builder()
.name("ConnectToJob")
.description("Connects to the output of another job"
+ " and simply echoes the data")
.build())
.create();
}
public static void main(String[] args) throws JsonProcessingException {
Map<String, Object> targetMap = new HashMap<>();
List<JobSource.TargetInfo> targetInfos = new ArrayList<>();
JobSource.TargetInfo targetInfo = new JobSource.TargetInfoBuilder().withClientId("abc")
.withSourceJobName("SyntheticSourceJob")
.withQuery("select country from stream where status==500")
.build();
targetInfos.add(targetInfo);
targetMap.put("targets", targetInfos);
ObjectMapper mapper = new ObjectMapper();
String target = mapper.writeValueAsString(targetMap);
// To run locally we use the LocalJobExecutor
LocalJobExecutorNetworked.execute(new JobConnectorJob().getJobInstance(),
new Parameter(MantisSourceJobConnector.MANTIS_SOURCEJOB_TARGET_KEY, target));
}
}
| 7,732 |
0 | Create_ds/mantis/mantis-examples/mantis-examples-jobconnector-sample/src/main/java/com/netflix/mantis/samples | Create_ds/mantis/mantis-examples/mantis-examples-jobconnector-sample/src/main/java/com/netflix/mantis/samples/stage/EchoStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.mantis.samples.stage;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
/**
* A simple stage that extracts data from the incoming {@link MantisServerSentEvent} and echoes it.
*/
@Slf4j
public class EchoStage implements ScalarComputation<MantisServerSentEvent, String> {
private static final ObjectMapper mapper = new ObjectMapper();
@Override
public Observable<String> call(Context context, Observable<MantisServerSentEvent> eventsO) {
return eventsO
.map(MantisServerSentEvent::getEventAsString)
.map((event) -> {
log.info("Received: {}", event);
return event;
});
}
@Override
public void init(Context context) {
}
public static ScalarToScalar.Config<MantisServerSentEvent, String> config() {
return new ScalarToScalar.Config<MantisServerSentEvent, String>()
.codec(Codecs.string());
}
}
| 7,733 |
0 | Create_ds/mantis/mantis-discovery-proto/src/test/java/io/mantisrx/discovery | Create_ds/mantis/mantis-discovery-proto/src/test/java/io/mantisrx/discovery/proto/AppJobClustersMapTest.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.discovery.proto;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
public class AppJobClustersMapTest {
@Test
public void testSerDe() throws IOException {
Map<String, Object> mappings = new HashMap<>();
Map<String, Object> defaultAppMappings = new HashMap<>();
defaultAppMappings.put(StreamJobClusterMap.DEFAULT_STREAM_KEY, "SharedPushEventSource");
defaultAppMappings.put("testEventStream", "TestPushEventSource");
mappings.put(AppJobClustersMap.DEFAULT_APP_KEY, defaultAppMappings);
Map<String, Object> customAppMappings = new HashMap<>();
customAppMappings.put("customEventStream", "CustomPushEventSource");
customAppMappings.put(StreamJobClusterMap.DEFAULT_STREAM_KEY, "CustomDefaultPushEventSource");
mappings.put("custom", customAppMappings);
AppJobClustersMap mapV1 = new AppJobClustersMap(AppJobClustersMap.VERSION_1, System.currentTimeMillis(), mappings);
assertEquals("SharedPushEventSource", mapV1.getStreamJobClusterMap(AppJobClustersMap.DEFAULT_APP_KEY).getJobCluster(StreamJobClusterMap.DEFAULT_STREAM_KEY));
assertEquals("SharedPushEventSource", mapV1.getStreamJobClusterMap(AppJobClustersMap.DEFAULT_APP_KEY).getJobCluster("AnyRandomStream"));
assertEquals("TestPushEventSource", mapV1.getStreamJobClusterMap(AppJobClustersMap.DEFAULT_APP_KEY).getJobCluster("testEventStream"));
StreamJobClusterMap customStreamJCMap = mapV1.getStreamJobClusterMap("custom");
System.out.println("custom app mapping: " + customStreamJCMap);
assertTrue(customStreamJCMap.getStreamJobClusterMap().size() == 2);
assertTrue(customStreamJCMap.getStreamJobClusterMap().containsKey(StreamJobClusterMap.DEFAULT_STREAM_KEY));
assertFalse(customStreamJCMap.getStreamJobClusterMap().containsKey("testEventStream"));
assertTrue(customStreamJCMap.getStreamJobClusterMap().containsKey("customEventStream"));
assertEquals("CustomDefaultPushEventSource", customStreamJCMap.getJobCluster(StreamJobClusterMap.DEFAULT_STREAM_KEY));
assertEquals("CustomDefaultPushEventSource", customStreamJCMap.getJobCluster("AnyRandomStreamName"));
assertEquals("CustomDefaultPushEventSource", customStreamJCMap.getJobCluster("testEventStream"));
assertEquals("CustomPushEventSource", customStreamJCMap.getJobCluster("customEventStream"));
ObjectMapper mapper = new ObjectMapper();
String mappingStr = mapper.writeValueAsString(mapV1);
System.out.println("input mappings " + mappingStr);
AppJobClustersMap appJobClustersMap = mapper.readValue(mappingStr, AppJobClustersMap.class);
System.out.println("parsed mappings " + appJobClustersMap);
assertEquals(mapV1, appJobClustersMap);
}
@Test
public void testDeSerFromString() throws IOException {
final ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
final String mapping = "{\"version\":\"1\",\"timestamp\":2,\"mappings\":{\"testApp\":{\"__default__\":\"SharedMrePublishEventSource\"}}}";
AppJobClustersMap appJobClustersMap = mapper.readValue(mapping, AppJobClustersMap.class);
assertEquals(2, appJobClustersMap.getTimestamp());
assertEquals("testApp", appJobClustersMap.getStreamJobClusterMap("testApp").getAppName());
assertEquals("SharedMrePublishEventSource", appJobClustersMap.getStreamJobClusterMap("testApp").getJobCluster("testStream"));
}
}
| 7,734 |
0 | Create_ds/mantis/mantis-discovery-proto/src/main/java/io/mantisrx/discovery | Create_ds/mantis/mantis-discovery-proto/src/main/java/io/mantisrx/discovery/proto/MantisWorker.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.discovery.proto;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.net.InetSocketAddress;
import java.util.Objects;
public class MantisWorker {
private final String host;
private final int port;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public MantisWorker(@JsonProperty("host") String host,
@JsonProperty("port") int port) {
this.host = host;
this.port = port;
}
public String getHost() {
return host;
}
public int getPort() {
return port;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final MantisWorker that = (MantisWorker) o;
return port == that.port
&& Objects.equals(host, that.host);
}
@Override
public int hashCode() {
return Objects.hash(host, port);
}
@Override
public String toString() {
return "MantisWorker{" + "host='" + host + '\''
+ ", port=" + port
+ '}';
}
/**
* Returns an {@link InetSocketAddress} representation of this worker.
*/
public InetSocketAddress toInetSocketAddress() {
return InetSocketAddress.createUnresolved(host, port);
}
/**
* Estimate the size (in Bytes) of this object using the size of its fields.
*/
public int size() {
return host.getBytes().length // host
+ Integer.BYTES; // port
}
}
| 7,735 |
0 | Create_ds/mantis/mantis-discovery-proto/src/main/java/io/mantisrx/discovery | Create_ds/mantis/mantis-discovery-proto/src/main/java/io/mantisrx/discovery/proto/JobDiscoveryInfo.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.discovery.proto;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
public class JobDiscoveryInfo {
@JsonIgnore
private static final int INGEST_STAGE = 1;
private final String jobCluster;
private final String jobId;
private final Map<Integer, StageWorkers> stageWorkersMap;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public JobDiscoveryInfo(@JsonProperty("jobCluster") final String jobCluster,
@JsonProperty("jobId") final String jobId,
@JsonProperty("stageWorkersMap") final Map<Integer, StageWorkers> stageWorkersMap) {
this.jobCluster = jobCluster;
this.jobId = jobId;
this.stageWorkersMap = stageWorkersMap;
}
public String getJobCluster() {
return jobCluster;
}
public String getJobId() {
return jobId;
}
public Map<Integer, StageWorkers> getStageWorkersMap() {
return stageWorkersMap;
}
@JsonIgnore
public StageWorkers getIngestStageWorkers() {
return stageWorkersMap.getOrDefault(INGEST_STAGE, new StageWorkers(jobCluster, jobId, INGEST_STAGE, Collections.emptyList()));
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final JobDiscoveryInfo that = (JobDiscoveryInfo) o;
return Objects.equals(jobId, that.jobId) &&
Objects.equals(stageWorkersMap, that.stageWorkersMap);
}
@Override
public int hashCode() {
return Objects.hash(jobId, stageWorkersMap);
}
@Override
public String toString() {
return "JobDiscoveryInfo{"
+ " jobCluster='" + jobCluster + '\''
+ ", jobId='" + jobId + '\''
+ ", stageWorkersMap=" + stageWorkersMap
+ '}';
}
}
| 7,736 |
0 | Create_ds/mantis/mantis-discovery-proto/src/main/java/io/mantisrx/discovery | Create_ds/mantis/mantis-discovery-proto/src/main/java/io/mantisrx/discovery/proto/StageWorkers.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.discovery.proto;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import java.util.Objects;
public class StageWorkers {
private final String jobCluster;
private final String jobId;
private final int stageNum;
private final List<MantisWorker> workers;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public StageWorkers(@JsonProperty("jobCluster") String jobCluster,
@JsonProperty("jobId") String jobId,
@JsonProperty("stageNum") int stageNum,
@JsonProperty("workers") List<MantisWorker> workers) {
this.jobCluster = jobCluster;
this.jobId = jobId;
this.stageNum = stageNum;
this.workers = workers;
}
public String getJobCluster() {
return jobCluster;
}
public String getJobId() {
return jobId;
}
public int getStageNum() {
return stageNum;
}
public List<MantisWorker> getWorkers() {
return workers;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final StageWorkers that = (StageWorkers) o;
return stageNum == that.stageNum &&
Objects.equals(jobId, that.jobId) &&
Objects.equals(workers, that.workers);
}
@Override
public int hashCode() {
return Objects.hash(jobId, stageNum, workers);
}
@Override
public String toString() {
return "StageWorkers{"
+ "jobCluster='" + jobCluster + '\''
+ ", jobId='" + jobId + '\''
+ ", stageNum=" + stageNum
+ ", workers=" + workers
+ '}';
}
}
| 7,737 |
0 | Create_ds/mantis/mantis-discovery-proto/src/main/java/io/mantisrx/discovery | Create_ds/mantis/mantis-discovery-proto/src/main/java/io/mantisrx/discovery/proto/StreamJobClusterMap.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.discovery.proto;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
public class StreamJobClusterMap {
public static final String DEFAULT_STREAM_KEY = "__default__";
private String appName;
private Map<String, String> mappings;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public StreamJobClusterMap(final String appName,
final Map<String, String> mappings) {
this.appName = appName;
this.mappings = mappings;
}
// default constructor for Json deserialize
public StreamJobClusterMap() {
}
public String getAppName() {
return appName;
}
@JsonIgnore
public String getJobCluster(String streamName) {
return mappings.getOrDefault(streamName, mappings.get(DEFAULT_STREAM_KEY));
}
public Map<String, String> getStreamJobClusterMap() {
return Collections.unmodifiableMap(mappings);
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final StreamJobClusterMap that = (StreamJobClusterMap) o;
return Objects.equals(appName, that.appName) &&
Objects.equals(mappings, that.mappings);
}
@Override
public int hashCode() {
return Objects.hash(appName, mappings);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("StreamJobClusterMap{");
sb.append("appName='").append(appName).append('\'');
sb.append(", mappings=").append(mappings);
sb.append('}');
return sb.toString();
}
}
| 7,738 |
0 | Create_ds/mantis/mantis-discovery-proto/src/main/java/io/mantisrx/discovery | Create_ds/mantis/mantis-discovery-proto/src/main/java/io/mantisrx/discovery/proto/AppJobClustersMap.java | /*
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.discovery.proto;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
public class AppJobClustersMap {
public static final String VERSION_1 = "1";
public static final String DEFAULT_APP_KEY = "__default__";
/**
* time stamp associated with the mapping when the mapping was created/updated
*/
private long timestamp;
private final Map<String, Map<String, String>> mappings = new HashMap<>();
private String version = VERSION_1;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public AppJobClustersMap(@JsonProperty("version") String version,
@JsonProperty("timestamp") long ts,
@JsonProperty("mappings") Map<String, Object> mappings) {
checkNotNull(mappings, "mappings");
checkNotNull(version, "version");
this.timestamp = ts;
if (!version.equals(VERSION_1)) {
throw new IllegalArgumentException("version " + version + " is not supported");
}
this.version = version;
mappings.entrySet()
.stream()
.forEach(e -> this.mappings.put(e.getKey(), (Map<String, String>) e.getValue()));
}
// default constructor for Json deserialize
public AppJobClustersMap() {
}
/**
* Ensures the object reference is not null.
*/
@JsonIgnore
public static <T> T checkNotNull(T obj, String name) {
if (obj == null) {
String msg = String.format("parameter '%s' cannot be null", name);
throw new NullPointerException(msg);
}
return obj;
}
public String getVersion() {
return version;
}
public Map<String, Map<String, String>> getMappings() {
return Collections.unmodifiableMap(mappings);
}
public long getTimestamp() {
return timestamp;
}
@JsonIgnore
private Map<String, String> defaultStreamJobClusterMap() {
return mappings.getOrDefault(DEFAULT_APP_KEY, Collections.emptyMap());
}
@JsonIgnore
public StreamJobClusterMap getStreamJobClusterMap(final String appName) {
Map<String, String> mappings = this.mappings.getOrDefault(appName, defaultStreamJobClusterMap());
return new StreamJobClusterMap(appName, mappings);
}
@JsonIgnore
public AppJobClustersMap getFilteredAppJobClustersMap(final List<String> apps) {
AppJobClustersMap.Builder builder = new AppJobClustersMap.Builder();
builder = builder.withVersion(version);
builder = builder.withTimestamp(timestamp);
if (apps != null) {
for (final String appName : apps) {
builder = builder.withAppJobCluster(appName, getStreamJobClusterMap(appName));
}
}
return builder.build();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AppJobClustersMap that = (AppJobClustersMap) o;
return timestamp == that.timestamp &&
version.equals(that.version) &&
mappings.equals(that.mappings);
}
@Override
public int hashCode() {
return Objects.hash(version, timestamp, mappings);
}
@Override
public String toString() {
return "AppJobClustersMap{" +
"version='" + version + '\'' +
", timestamp=" + timestamp +
", mappings=" + mappings +
'}';
}
static class Builder {
private final Map<String, Object> mappings = new HashMap<>();
private String version;
private long ts = -1;
Builder withVersion(String version) {
this.version = version;
return this;
}
Builder withTimestamp(long ts) {
this.ts = ts;
return this;
}
Builder withAppJobCluster(String appName, StreamJobClusterMap streamJobCluster) {
mappings.put(appName, streamJobCluster.getStreamJobClusterMap());
return this;
}
public AppJobClustersMap build() {
if (version == null) {
throw new IllegalArgumentException("version cannot be null when creating AppJobClustersMap");
}
if (ts == -1) {
throw new IllegalArgumentException("timestamp not specified when creating AppJobClustersMap");
}
return new AppJobClustersMap(version, ts, mappings);
}
}
}
| 7,739 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-publish/src/main/java/io/mantisrx/sourcejobs | Create_ds/mantis/mantis-source-jobs/mantis-source-job-publish/src/main/java/io/mantisrx/sourcejobs/publish/PushRequestEventSourceJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejobs.publish;
import com.mantisrx.common.utils.MantisSourceJobConstants;
import io.mantisrx.connector.publish.core.QueryRegistry;
import io.mantisrx.connector.publish.source.http.PushHttpSource;
import io.mantisrx.connector.publish.source.http.SourceSink;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.sourcejobs.publish.core.RequestPostProcessor;
import io.mantisrx.sourcejobs.publish.core.RequestPreProcessor;
import io.mantisrx.sourcejobs.publish.core.Utils;
import io.mantisrx.sourcejobs.publish.stages.EchoStage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class PushRequestEventSourceJob extends MantisJobProvider<String> {
private static final Logger LOGGER = LoggerFactory.getLogger(PushRequestEventSourceJob.class);
private static final String MANTIS_CLIENT_ID = "MantisPushRequestEvents";
@Override
public Job<String> getJobInstance() {
String jobId = Utils.getEnvVariable("JOB_ID", "PushRequestEventSourceJobLocal-1");
String mantisClientId = MANTIS_CLIENT_ID + "_" + jobId;
QueryRegistry queryRegistry = new QueryRegistry.Builder()
.withClientIdPrefix(mantisClientId)
.build();
String customPortName = "MANTIS_WORKER_CUSTOM_PORT";
String consolePort = Utils.getEnvVariable(customPortName, "9090");
int port = 9090;
if (consolePort != null && !consolePort.isEmpty()) {
port = Integer.parseInt(consolePort);
}
return
MantisJob
.source(new PushHttpSource(queryRegistry, port))
.stage(new EchoStage(), EchoStage.config())
.sink(new SourceSink(
new RequestPreProcessor(queryRegistry),
new RequestPostProcessor(queryRegistry),
mantisClientId))
.parameterDefinition(new IntParameter()
.name(MantisSourceJobConstants.ECHO_STAGE_BUFFER_MILLIS)
.description("millis to buffer events before processing")
.validator(Validators.range(100, 1000))
.defaultValue(250)
.build())
.metadata(new Metadata.Builder()
.name("PushRequestEventSourceJob")
.description("Fetches request events from any source in a distributed manner. "
+ "The output is served via HTTP server using SSE protocol.")
.build())
.create();
}
public static void main(String[] args) {
LocalJobExecutorNetworked.execute(new PushRequestEventSourceJob().getJobInstance());
}
}
| 7,740 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-publish/src/main/java/io/mantisrx/sourcejobs/publish | Create_ds/mantis/mantis-source-jobs/mantis-source-job-publish/src/main/java/io/mantisrx/sourcejobs/publish/core/RequestPostProcessor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejobs.publish.core;
import com.mantisrx.common.utils.MantisSourceJobConstants;
import io.mantisrx.connector.publish.core.QueryRegistry;
import io.mantisrx.runtime.Context;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import rx.functions.Func2;
public class RequestPostProcessor implements Func2<Map<String, List<String>>, Context, Void> {
private static final Logger LOGGER = Logger.getLogger(RequestPostProcessor.class);
private final QueryRegistry queryRegistry;
public RequestPostProcessor(QueryRegistry queryRegistry) {
this.queryRegistry = queryRegistry;
}
@Override
public Void call(Map<String, List<String>> queryParams, Context context) {
LOGGER.info("RequestPostProcessor:queryParams: " + queryParams);
if (queryParams != null) {
if (queryParams.containsKey(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME) && queryParams.containsKey(MantisSourceJobConstants.CRITERION_PARAM_NAME)) {
final String subId = queryParams.get(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME).get(0);
final String query = queryParams.get(MantisSourceJobConstants.CRITERION_PARAM_NAME).get(0);
String targetApp = queryParams.containsKey(MantisSourceJobConstants.TARGET_APP_NAME_KEY) ? queryParams.get(MantisSourceJobConstants.TARGET_APP_NAME_KEY).get(0) : QueryRegistry.ANY;
if (subId != null && query != null) {
queryRegistry.deregisterQuery(targetApp, subId, query);
}
}
}
return null;
}
}
| 7,741 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-publish/src/main/java/io/mantisrx/sourcejobs/publish | Create_ds/mantis/mantis-source-jobs/mantis-source-job-publish/src/main/java/io/mantisrx/sourcejobs/publish/core/Utils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejobs.publish.core;
import io.mantisrx.common.MantisProperties;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class Utils {
public static String getEnvVariable(String envVariableName, String defaultValue) {
String v = MantisProperties.getProperty(envVariableName);
if (v != null && !v.isEmpty()) {
return v;
}
return defaultValue;
}
public static List<String> convertCommaSeparatedStringToList(String filterBy) {
List<String> terms = new ArrayList<>();
if (filterBy != null && !filterBy.isEmpty()) {
terms = Arrays.asList(filterBy.split("\\s*,\\s*"));
}
return terms;
}
}
| 7,742 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-publish/src/main/java/io/mantisrx/sourcejobs/publish | Create_ds/mantis/mantis-source-jobs/mantis-source-job-publish/src/main/java/io/mantisrx/sourcejobs/publish/core/RequestPreProcessor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejobs.publish.core;
import com.mantisrx.common.utils.MantisSourceJobConstants;
import io.mantisrx.connector.publish.core.QueryRegistry;
import io.mantisrx.runtime.Context;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.functions.Func2;
public class RequestPreProcessor implements Func2<Map<String, List<String>>, Context, Void> {
private static final Logger LOGGER = LoggerFactory.getLogger(RequestPreProcessor.class);
private final QueryRegistry queryRegistry;
private final Map<String, String> emptyMap = new HashMap<String, String>();
public RequestPreProcessor(QueryRegistry queryRegistry) {
this.queryRegistry = queryRegistry;
}
@Override
public Void call(Map<String, List<String>> queryParams, Context context) {
LOGGER.info("RequestPreProcessor:queryParams: " + queryParams);
if (queryParams != null) {
if (queryParams.containsKey(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME) && queryParams.containsKey(MantisSourceJobConstants.CRITERION_PARAM_NAME)) {
final String subId = queryParams.get(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME).get(0);
final String query = queryParams.get(MantisSourceJobConstants.CRITERION_PARAM_NAME).get(0);
String targetApp = queryParams.containsKey(MantisSourceJobConstants.TARGET_APP_NAME_KEY) ? queryParams.get(MantisSourceJobConstants.TARGET_APP_NAME_KEY).get(0) : QueryRegistry.ANY;
if (subId != null && query != null) {
queryRegistry.registerQuery(targetApp, subId, query, emptyMap, false);
}
}
}
return null;
}
}
| 7,743 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-publish/src/main/java/io/mantisrx/sourcejobs/publish | Create_ds/mantis/mantis-source-jobs/mantis-source-job-publish/src/main/java/io/mantisrx/sourcejobs/publish/stages/EchoStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejobs.publish.stages;
import com.mantisrx.common.utils.MantisSourceJobConstants;
import io.mantisrx.common.codec.Codecs;
import io.mantisrx.publish.netty.proto.MantisEventEnvelope;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.log4j.Logger;
import rx.Observable;
public class EchoStage implements ScalarComputation<String, String> {
private static final Logger LOGGER = Logger.getLogger(EchoStage.class);
private String clusterName;
private int bufferDuration = 100;
private String sourceNamePrefix;
private ObjectReader mantisEventEnvelopeReader;
private final ObjectMapper mapper = new ObjectMapper();
@Override
public void init(Context context) {
clusterName = context.getWorkerInfo().getJobClusterName();
bufferDuration = (int) context.getParameters().get(MantisSourceJobConstants.ECHO_STAGE_BUFFER_MILLIS);
sourceNamePrefix = "{" + MantisSourceJobConstants.MANTIS_META_SOURCE_NAME + ":" + "\"" + clusterName + "\",";
mantisEventEnvelopeReader = mapper.readerFor(MantisEventEnvelope.class);
}
private String insertSourceJobName(String event) {
StringBuilder sb = new StringBuilder(sourceNamePrefix);
int indexofbrace = event.indexOf('{');
if (indexofbrace != -1) {
event = sb.append(event.substring(indexofbrace + 1)).toString();
}
return event;
}
public Observable<String> call(Context context,
Observable<String> events) {
return events
.buffer(bufferDuration, TimeUnit.MILLISECONDS)
.flatMapIterable(i -> i)
.filter((event) -> !event.isEmpty())
.flatMap((envelopeStr) -> {
try {
MantisEventEnvelope envelope = mantisEventEnvelopeReader.readValue(envelopeStr);
return Observable.from(envelope.getEventList())
.map((event) -> event.getData());
} catch (IOException e) {
LOGGER.error(e.getMessage());
// Could not parse just send it along.
return Observable.just(envelopeStr);
}
})
.map(this::insertSourceJobName)
.onErrorResumeNext((t1) -> {
LOGGER.error("Exception occurred in : " + clusterName + " error is " + t1.getMessage());
return Observable.empty();
});
}
public static List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> params = new ArrayList<>();
// buffer duration
params.add(new IntParameter()
.name(MantisSourceJobConstants.ECHO_STAGE_BUFFER_MILLIS)
.description("buffer time in millis")
.validator(Validators.range(100, 10000))
.defaultValue(250)
.build());
return params;
}
public static ScalarToScalar.Config<String, String> config() {
return new ScalarToScalar.Config<String, String>()
.codec(Codecs.string())
.concurrentInput()
.withParameters(getParameters());
}
} | 7,744 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/CustomizedAutoAckTaggingStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka;
import static io.mantisrx.common.SystemParameters.STAGE_CONCURRENCY;
import io.mantisrx.connector.kafka.KafkaAckable;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.shaded.com.google.common.collect.Lists;
import io.mantisrx.sourcejob.kafka.core.TaggedData;
import io.mantisrx.sourcejob.kafka.core.utils.JsonUtility;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.functions.Func1;
public class CustomizedAutoAckTaggingStage extends AutoAckTaggingStage {
private static Logger logger = LoggerFactory.getLogger(CustomizedAutoAckTaggingStage.class);
private String jobName;
private String timestampField = "_ts_";
private AtomicLong latestTimeStamp = new AtomicLong();
private boolean isFlattenFields = false;
private List<String> fieldsToFlatten = new ArrayList<>();
private Func1<Map<String, Object>, Map<String, Object>> preMapperFunc = t -> t;
@Override
public void init(Context context) {
super.init(context);
jobName = context.getWorkerInfo().getJobName();
String timeStampFieldParam = System.getenv("JOB_PARAM_timeStampField");
if (timeStampFieldParam != null && !timeStampFieldParam.isEmpty()) {
this.timestampField = timeStampFieldParam;
}
String flattenFieldsStr = System.getenv("JOB_PARAM_fieldsToFlatten");
if (flattenFieldsStr != null && !flattenFieldsStr.isEmpty() && !flattenFieldsStr.equals("NONE")) {
String[] fields = flattenFieldsStr.split(",");
if (fields.length > 0) {
isFlattenFields = true;
for (String field : fields) {
fieldsToFlatten.add(field.trim());
}
logger.info("Field flattening enabled for fields {}", fieldsToFlatten);
}
}
}
private void flattenFields(Map<String, Object> rawData) {
for (String field : fieldsToFlatten) {
flattenField(rawData, field);
}
}
private void flattenField(Map<String, Object> rawData, String fieldName) {
String dataJson = (String) rawData.get(fieldName);
try {
Map<String, Object> geoDataMap = JsonUtility.jsonToMap(dataJson);
Iterator<Entry<String, Object>> it = geoDataMap.entrySet().iterator();
while (it.hasNext()) {
Entry<String, Object> e = it.next();
String key = e.getKey();
Object val = e.getValue();
if (key != null && val != null) {
rawData.put(fieldName + "." + key, val);
}
}
} catch (Exception e) {
logger.warn("Error flattening field " + fieldName + " error -> " + e.getMessage());
}
}
@Override
protected Map<String, Object> applyPreMapping(final Context context, final Map<String, Object> rawData) {
if (rawData == null) {
throw new RuntimeException("rawData is null");
}
long now = System.currentTimeMillis();
if (rawData.containsKey(timestampField)) {
long ts = (Long) rawData.get(timestampField);
long latestTsYet = latestTimeStamp.get();
if (ts > latestTsYet) {
latestTimeStamp.compareAndSet(latestTsYet, ts);
}
// TODO DynamicGauge.set("manits.source.timelag", (now - latestTimeStamp.get()));
}
try {
preMapperFunc.call(rawData);
} catch (Exception e) {
// TODO DynamicCounter.increment("mantis.source.premapping.failed", "mantisJobName", jobName);
logger.warn("Exception applying premapping function " + e.getMessage());
}
final Map<String, Object> modifiedData = new HashMap<>(rawData);
modifiedData.put(MANTIS_META_SOURCE_NAME, jobName);
modifiedData.put(MANTIS_META_SOURCE_TIMESTAMP, now);
if (isFlattenFields) {
flattenFields(modifiedData);
}
return modifiedData;
}
public static ScalarToScalar.Config<KafkaAckable, TaggedData> config() {
ScalarToScalar.Config<KafkaAckable, TaggedData> config = new ScalarToScalar.Config<KafkaAckable, TaggedData>()
.concurrentInput()
.codec(AutoAckTaggingStage.taggedDataCodec())
.withParameters(getParameters());
String jobParamPrefix = "JOB_PARAM_";
String stageConcurrencyParam = jobParamPrefix + STAGE_CONCURRENCY;
String concurrency = System.getenv(stageConcurrencyParam);
if (concurrency != null && !concurrency.isEmpty()) {
logger.info("Job param: " + stageConcurrencyParam + " value: " + concurrency);
try {
config = config.concurrentInput(Integer.parseInt(concurrency));
} catch (NumberFormatException ignored) {
}
}
return config;
}
static List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> params = Lists.newArrayList();
// queryable source parameters
params.add(new StringParameter()
.name("fieldsToFlatten")
.description("comma separated list of fields to flatten")
.validator(Validators.notNullOrEmpty())
.defaultValue("NONE")
.build());
params.add(new StringParameter()
.name("timeStampField")
.description("the timestamp field in the event. used to calculate lag")
.validator(Validators.notNullOrEmpty())
.defaultValue("_ts_")
.build());
params.add(new IntParameter()
.name(STAGE_CONCURRENCY)
.description("Parameter to control number of computation workers to use for stage processing")
.defaultValue(1)
.validator(Validators.range(1, 8))
.build());
return params;
}
}
| 7,745 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/AutoAckTaggingStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_PARSE_MSG_IN_SOURCE;
import io.mantisrx.connector.kafka.KafkaAckable;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.source.serde.ParseException;
import io.mantisrx.connector.kafka.source.serde.Parser;
import io.mantisrx.connector.kafka.source.serde.ParserType;
import io.mantisrx.runtime.Context;
import java.util.Collections;
import java.util.Map;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AutoAckTaggingStage extends AbstractAckableTaggingStage {
private static final Logger LOG = LoggerFactory.getLogger(AutoAckTaggingStage.class);
public AutoAckTaggingStage() {
}
private static final Logger logger = LoggerFactory.getLogger(AutoAckTaggingStage.class);
/**
* default impl to ack the received data and returned parse kafka data
*
* @param ackable
*
* @return
*/
@Override
protected Map<String, Object> processAndAck(final Context context, KafkaAckable ackable) {
try {
Boolean messageParsedInSource = (Boolean) context.getParameters().get(KafkaSourceParameters.PARSE_MSG_IN_SOURCE, DEFAULT_PARSE_MSG_IN_SOURCE);
String messageParserType = (String) context.getParameters().get(KafkaSourceParameters.PARSER_TYPE, ParserType.SIMPLE_JSON.getPropName());
if (messageParsedInSource) {
final Optional<Map<String, Object>> parsedEventO = ackable.getKafkaData().getParsedEvent();
return parsedEventO.orElse(Collections.emptyMap());
} else {
final Parser parser = ParserType.parser(messageParserType).getParser();
if (parser.canParse(ackable.getKafkaData().getRawBytes())) {
return parser.parseMessage(ackable.getKafkaData().getRawBytes());
} else {
LOG.warn("cannot parse message {}", ackable.getKafkaData().getRawBytes().toString());
throw new ParseException("cannot parse message");
}
}
} catch (Throwable t) {
if (t instanceof ParseException) {
logger.warn("failed to parse message", t);
} else {
logger.error("caught unexpected exception", t);
}
} finally {
ackable.ack();
}
return Collections.emptyMap();
}
// no op
@Override
protected Map<String, Object> preProcess(Map<String, Object> rawData) {
return rawData;
}
}
| 7,746 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/AbstractAckableTaggingStage.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka;
import io.mantisrx.common.codec.Codec;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.connector.kafka.KafkaAckable;
import io.mantisrx.mql.jvm.core.Query;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.sourcejob.kafka.core.TaggedData;
import io.mantisrx.sourcejob.kafka.sink.MQLQueryManager;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
public abstract class AbstractAckableTaggingStage implements ScalarComputation<KafkaAckable, TaggedData> {
public static final String MANTIS_META_IS_COMPLETE_DATA = "mantis.meta.isCompleteData";
public static final String MANTIS_META_SOURCE_NAME = "mantis.meta.sourceName";
public static final String MANTIS_META_SOURCE_TIMESTAMP = "mantis.meta.timestamp";
public static final String MANTIS_QUERY_COUNTER = "mantis_query_out";
public static final String MQL_COUNTER = "mql_out";
public static final String MQL_FAILURE = "mql_failure";
public static final String MQL_CLASSLOADER_ERROR = "mql_classloader_error";
private static final Logger logger = LoggerFactory.getLogger(AbstractAckableTaggingStage.class);
private static final String MANTIS_META = "mantis.meta";
protected AtomicBoolean trackIsComplete = new AtomicBoolean(false);
private AtomicBoolean errorLogged = new AtomicBoolean(false);
public Observable<TaggedData> call(Context context, Observable<KafkaAckable> data) {
context.getMetricsRegistry().registerAndGet(new Metrics.Builder()
.name("mql")
.addCounter(MQL_COUNTER)
.addCounter(MQL_FAILURE)
.addCounter(MQL_CLASSLOADER_ERROR)
.addCounter(MANTIS_QUERY_COUNTER).build());
return data
.map(ackable -> {
Map<String, Object> rawData = processAndAck(context, ackable);
return preProcess(rawData);
})
.filter((d) -> !d.isEmpty())
.map(mapData -> applyPreMapping(context, mapData))
.filter((d) -> !d.isEmpty())
.flatMapIterable(d -> tagData(d, context));
}
protected abstract Map<String, Object> processAndAck(final Context context, KafkaAckable ackable);
protected abstract Map<String, Object> preProcess(final Map<String, Object> rawData);
protected Map<String, Object> applyPreMapping(final Context context, final Map<String, Object> rawData) {
return rawData;
}
private boolean isMetaEvent(Map<String, Object> d) {
return d.containsKey(MANTIS_META_IS_COMPLETE_DATA) || d.containsKey(MANTIS_META);
}
@SuppressWarnings("unchecked")
protected List<TaggedData> tagData(Map<String, Object> d, Context context) {
List<TaggedData> taggedDataList = new ArrayList<>();
boolean metaEvent = isMetaEvent(d);
Metrics metrics = context.getMetricsRegistry().getMetric("mql");
Collection<Query> queries = MQLQueryManager.getInstance().getRegisteredQueries();
Iterator<Query> it = queries.iterator();
while (it.hasNext()) {
Query query = it.next();
try {
if (metaEvent) {
TaggedData tg = new TaggedData(d);
tg.addMatchedClient(query.getSubscriptionId());
taggedDataList.add(tg);
} else if (query.matches(d)) {
Map<String, Object> projected = query.project(d);
projected.put(MANTIS_META_SOURCE_NAME, d.get(MANTIS_META_SOURCE_NAME));
projected.put(MANTIS_META_SOURCE_TIMESTAMP, d.get(MANTIS_META_SOURCE_TIMESTAMP));
TaggedData tg = new TaggedData(projected);
tg.addMatchedClient(query.getSubscriptionId());
taggedDataList.add(tg);
}
} catch (Exception ex) {
if (ex instanceof ClassNotFoundException) {
logger.error("Error loading MQL: " + ex.getMessage());
ex.printStackTrace();
metrics.getCounter(MQL_CLASSLOADER_ERROR).increment();
} else {
ex.printStackTrace();
metrics.getCounter(MQL_FAILURE).increment();
logger.error("MQL Error: " + ex.getMessage());
logger.error("MQL Query: " + query.getRawQuery());
logger.error("MQL Datum: " + d);
}
} catch (Error e) {
metrics.getCounter(MQL_FAILURE).increment();
if (!errorLogged.get()) {
logger.error("caught Error when processing MQL {} on {}", query.getRawQuery(), d.toString(), e);
errorLogged.set(true);
}
}
}
return taggedDataList;
}
public static Codec<TaggedData> taggedDataCodec() {
return new Codec<TaggedData>() {
@Override
public TaggedData decode(byte[] bytes) {
return new TaggedData(new HashMap<>());
}
@Override
public byte[] encode(final TaggedData value) {
return new byte[128];
}
};
}
}
| 7,747 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/QueryableKafkaSourceJob.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka;
import com.netflix.spectator.api.DefaultRegistry;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.source.KafkaSource;
import io.mantisrx.connector.kafka.source.serde.ParserType;
import io.mantisrx.runtime.Job;
import io.mantisrx.runtime.MantisJob;
import io.mantisrx.runtime.MantisJobProvider;
import io.mantisrx.runtime.executor.LocalJobExecutorNetworked;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.sourcejob.kafka.core.TaggedData;
import io.mantisrx.sourcejob.kafka.sink.QueryRequestPostProcessor;
import io.mantisrx.sourcejob.kafka.sink.QueryRequestPreProcessor;
import io.mantisrx.sourcejob.kafka.sink.TaggedDataSourceSink;
import org.apache.kafka.clients.consumer.ConsumerConfig;
/**
* Generic queryable source job to connect to configured kafka topic
*/
public class QueryableKafkaSourceJob extends MantisJobProvider<TaggedData> {
protected AutoAckTaggingStage getAckableTaggingStage() {
return new CustomizedAutoAckTaggingStage();
}
@Override
public Job<TaggedData> getJobInstance() {
KafkaSource kafkaSource = new KafkaSource(new DefaultRegistry());
return
MantisJob // kafkaSource to connect to kafka and stream events from the configured topic
.source(kafkaSource)
.stage(getAckableTaggingStage(), CustomizedAutoAckTaggingStage.config())
.sink(new TaggedDataSourceSink(new QueryRequestPreProcessor(), new QueryRequestPostProcessor()))
// required parameters
.create();
}
public static void main(String[] args) {
LocalJobExecutorNetworked.execute(new QueryableKafkaSourceJob().getJobInstance(),
new Parameter(KafkaSourceParameters.TOPIC, "nf_errors_log"),
new Parameter(KafkaSourceParameters.NUM_KAFKA_CONSUMER_PER_WORKER, "1"),
new Parameter(KafkaSourceParameters.PARSER_TYPE, ParserType.SIMPLE_JSON.getPropName()),
new Parameter(KafkaSourceParameters.PARSE_MSG_IN_SOURCE, "true"),
new Parameter(KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, "QueryableKafkaSourceLocal"),
new Parameter(KafkaSourceParameters.PREFIX + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "100.66.49.176:7102"),
new Parameter(KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"));
}
}
| 7,748 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/core/TaggedData.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka.core;
import io.mantisrx.runtime.codec.JsonType;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
public class TaggedData implements JsonType {
private final Set<String> matchedClients = new HashSet<String>();
private Map<String, Object> payLoad;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public TaggedData(@JsonProperty("data") Map<String, Object> data) {
this.payLoad = data;
}
public Set<String> getMatchedClients() {
return matchedClients;
}
public boolean matchesClient(String clientId) {
return matchedClients.contains(clientId);
}
public void addMatchedClient(String clientId) {
matchedClients.add(clientId);
}
public Map<String, Object> getPayload() {
return this.payLoad;
}
public void setPayload(Map<String, Object> newPayload) {
this.payLoad = newPayload;
}
}
| 7,749 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/core | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/core/utils/SourceJobConstants.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka.core.utils;
public class SourceJobConstants {
public static final String SUBSCRIPTION_ID_PARAM_NAME = "subscriptionId";
public static final String CRITERION_PARAM_NAME = "criterion";
public static final String FILTER_PARAM_NAME = "filter";
public static final String CLIENT_ID_PARAMETER_NAME = "clientId";
}
| 7,750 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/core | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/core/utils/JsonUtility.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka.core.utils;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectReader;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectWriter;
import java.io.IOException;
import java.util.Map;
public class JsonUtility {
private static final JsonUtility INSTANCE = new JsonUtility();
private JsonUtility() {}
public static Map<String, Object> jsonToMap(String jsonString) {
return INSTANCE._jsonToMap(jsonString);
}
public static String mapToJson(Map<String, ? extends Object> map) {
return INSTANCE._mapToJson(map);
}
private final ObjectMapper objectMapper = new ObjectMapper();
private final ObjectReader objectReader = objectMapper.readerFor(Map.class);
private Map<String, Object> _jsonToMap(String jsonString) {
try {
return objectReader.readValue(jsonString);
} catch (IOException e) {
throw new RuntimeException("Unable to parse JSON", e);
}
}
private final ObjectWriter objectWriter = objectMapper.writerFor(Map.class);
private String _mapToJson(Map<String, ? extends Object> map) {
try {
return objectWriter.writeValueAsString(map);
} catch (IOException e) {
throw new RuntimeException("Unable to write JSON", e);
}
}
} | 7,751 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/sink/QueryRequestPostProcessor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka.sink;
import static io.mantisrx.sourcejob.kafka.core.utils.SourceJobConstants.CRITERION_PARAM_NAME;
import static io.mantisrx.sourcejob.kafka.core.utils.SourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME;
import io.mantisrx.runtime.Context;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import rx.functions.Func2;
public class QueryRequestPostProcessor implements Func2<Map<String, List<String>>, Context, Void> {
private static Logger logger = Logger.getLogger(QueryRequestPostProcessor.class);
public QueryRequestPostProcessor() { }
@Override
public Void call(Map<String, List<String>> queryParams, Context context) {
logger.info("RequestPostProcessor:queryParams: " + queryParams);
if (queryParams != null) {
if (queryParams.containsKey(SUBSCRIPTION_ID_PARAM_NAME) && queryParams.containsKey(CRITERION_PARAM_NAME)) {
final String subId = queryParams.get(SUBSCRIPTION_ID_PARAM_NAME).get(0);
final String query = queryParams.get(CRITERION_PARAM_NAME).get(0);
final String clientId = queryParams.get("clientId").get(0);
if (subId != null && query != null) {
try {
if (clientId != null && !clientId.isEmpty()) {
deregisterQuery(clientId + "_" + subId);
} else {
deregisterQuery(subId);
}
// TODO - DynamicGauge.set("activeQueries", BasicTagList.of("mantisJobId", context.getJobId(),
// "mantisJobName",context.getWorkerInfo().getJobName()), (double) MQLQueryManager.getInstance().getRegisteredQueries().size());
} catch (Throwable t) {
logger.error("Error propagating unsubscription notification", t);
}
}
}
}
return null;
}
private void deregisterQuery(String subId) {
QueryRefCountMap.INSTANCE.removeQuery(subId);
}
}
| 7,752 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/sink/MQL.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka.sink;
import io.mantisrx.mql.jvm.core.Query;
import io.mantisrx.mql.shaded.clojure.java.api.Clojure;
import io.mantisrx.mql.shaded.clojure.lang.IFn;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Func1;
/**
* The MQL class provides a Java/Scala friendly static interface to MQL functionality which is written in Clojure.
* This class provides a few pieces of functionality;
* - It wraps the Clojure interop so that the user interacts with typed methods via the static interface.
* - It provides methods for accessing individual bits of query functionality, allowing interesting uses
* such as aggregator-mql which uses these components to implement the query in a horizontally scalable / distributed
* fashion on Mantis.
* - It functions as an Rx Transformer of MantisServerSentEvent to MQLResult allowing a user to inline all MQL
* functionality quickly as such: `myObservable.compose(MQL.parse(myQuery));`
*/
public class MQL {
//
// Clojure Interop
//
private static IFn require = Clojure.var("io.mantisrx.mql.shaded.clojure.core", "require");
static {
require.invoke(Clojure.read("io.mantisrx.mql.jvm.interfaces.core"));
require.invoke(Clojure.read("io.mantisrx.mql.jvm.interfaces.server"));
}
private static IFn cljMakeQuery = Clojure.var("io.mantisrx.mql.jvm.interfaces.server", "make-query");
private static IFn cljSuperset = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "queries->superset-projection");
private static IFn parser = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "parser");
private static IFn parses = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "parses?");
private static IFn getParseError = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "get-parse-error");
private static IFn queryToGroupByFn = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "query->groupby");
private static IFn queryToHavingPred = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "query->having-pred");
private static IFn queryToOrderBy = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "query->orderby");
private static IFn queryToLimit = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "query->limit");
private static IFn queryToExtrapolationFn = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "query->extrapolator");
private static IFn queryToAggregateFn = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "agg-query->projection");
private static IFn queryToWindow = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "query->window");
private static Logger logger = LoggerFactory.getLogger(MQL.class);
private static ConcurrentHashMap<HashSet<Query>, IFn> superSetProjectorCache = new ConcurrentHashMap<>();
private final String query;
private final boolean threadingEnabled;
private final Optional<String> sourceJobName;
public static void init() {
logger.info("Initializing MQL runtime.");
}
//
// Constructors and Static Factory Methods
//
public MQL(String query, boolean threadingEnabled) {
if (query == null) {
throw new IllegalArgumentException("MQL cannot be used as an operator with a null query.");
}
this.query = transformLegacyQuery(query);
if (!parses(query)) {
throw new IllegalArgumentException(getParseError(query));
}
this.threadingEnabled = threadingEnabled;
this.sourceJobName = Optional.empty();
}
public MQL(String query, String sourceJobName) {
if (query == null) {
throw new IllegalArgumentException("MQL cannot be used as an operator with a null query.");
}
this.query = transformLegacyQuery(query);
if (!parses(query)) {
throw new IllegalArgumentException(getParseError(query));
}
this.threadingEnabled = false;
this.sourceJobName = Optional.ofNullable(sourceJobName);
}
public static MQL parse(String query) {
return new MQL(query, false);
}
public static MQL parse(String query, boolean threadingEnabled) { return new MQL(query, threadingEnabled); }
public static MQL parse(String query, String sourceName) { return new MQL(query, sourceName); }
//
// Source Job Integration
//
/**
* Constructs an object implementing the Query interface.
* This includes functions;
* matches (Map<String, Object>>) -> Boolean
* Returns true iff the data contained within the map parameter satisfies the query's WHERE clause.
* project (Map<String, Object>>) -> Map<String, Object>>
* Returns the provided map in accordance with the SELECT clause of the query.
* sample (Map<String, Object>>) -> Boolean
* Returns true if the data should be sampled, this function is a tautology if no SAMPLE clause is provided.
*
* @param subscriptionId The ID representing the subscription.
* @param query The (valid) MQL query to parse.
*
* @return An object implementing the Query interface.
*/
public static Query makeQuery(String subscriptionId, String query) {
/*
if (!parses(query)) {
String error = getParseError(query);
logger.error("Failed to parse query [" + query + "]\nError: " + error + ".");
throw new IllegalArgumentException(error);
}
*/
return (Query) cljMakeQuery.invoke(subscriptionId, query.trim());
}
@SuppressWarnings("unchecked")
private static IFn computeSuperSetProjector(HashSet<Query> queries) {
ArrayList<String> qs = new ArrayList<>(queries.size());
for (Query query : queries) {
qs.add(query.getRawQuery());
}
return (IFn) cljSuperset.invoke(new ArrayList(qs));
}
/**
* Projects a single Map<String, Object> which contains a superset of all fields for the provided queries.
* This is useful in use cases such as the mantis-realtime-events library in which we desire to minimize the data
* egressed off box. This should minimize JSON serialization time as well as network bandwidth used to transmit
* the events.
* <p>
* NOTE: This function caches the projectors for performance reasons, this has implications for memory usage as each
* combination of queries results in a new cached function. In practice this has had little impact for <= 100
* queries.
*
* @param queries A Collection of Query objects generated using #makeQuery(String subscriptionId, String query).
* @param datum A Map representing the input event to be projected.
*
* @return A Map representing the union (superset) of all fields required for processing all queries passed in.
*/
@SuppressWarnings("unchecked")
public static Map<String, Object> projectSuperSet(Collection<Query> queries, Map<String, Object> datum) {
IFn superSetProjector = superSetProjectorCache.computeIfAbsent(new HashSet<Query>(queries), (qs) -> {
return computeSuperSetProjector(qs);
});
return (Map<String, Object>) superSetProjector.invoke(datum);
}
//
// Partial Query Functionality
//
public static Func1<Map<String, Object>, Object> getGroupByFn(String query) {
IFn func = (IFn) queryToGroupByFn.invoke(query);
return func::invoke;
}
@SuppressWarnings("unchecked")
public static Func1<Map<String, Object>, Boolean> getHavingPredicate(String query) {
IFn func = (IFn) queryToHavingPred.invoke(query);
return (datum) -> (Boolean) func.invoke(datum);
}
@SuppressWarnings("unchecked")
public static Func1<Observable<Map<String, Object>>, Observable<Map<String, Object>>> getAggregateFn(String query) {
IFn func = (IFn) queryToAggregateFn.invoke(query);
return (obs) -> (Observable<Map<String, Object>>) func.invoke(obs);
}
@SuppressWarnings("unchecked")
public static Func1<Map<String, Object>, Map<String, Object>> getExtrapolationFn(String query) {
IFn func = (IFn) queryToExtrapolationFn.invoke(query);
return (datum) -> (Map<String, Object>) func.invoke(datum);
}
@SuppressWarnings("unchecked")
public static Func1<Observable<Map<String, Object>>, Observable<Map<String, Object>>> getOrderBy(String query) {
IFn func = (IFn) queryToOrderBy.invoke(query);
return obs -> (Observable<Map<String, Object>>) func.invoke(obs);
}
// public static List<Long> getWindow(String query) {
// clojure.lang.PersistentVector result = (clojure.lang.PersistentVector)queryToWindow.invoke(query);
// Long window = (Long)result.nth(0);
// Long shift = (Long)result.nth(1);
// return Arrays.asList(window, shift);
// }
public static Long getLimit(String query) {
return (Long) queryToLimit.invoke(query);
}
//
// Helper Functions
//
/**
* A predicate which indicates whether or not the MQL parser considers query to be a valid query.
*
* @param query A String representing the MQL query.
*
* @return A boolean indicating whether or not the query successfully parses.
*/
public static Boolean parses(String query) {
return (Boolean) parses.invoke(query);
}
/**
* A convenience function allowing a caller to determine what went wrong if a call to #parses(String query) returns
* false.
*
* @param query A String representing the MQL query.
*
* @return A String representing the parse error for an MQL query, null if no parse error occurred.
*/
public static String getParseError(String query) {
return (String) getParseError.invoke(query);
}
/**
* A helper which converts bare true/false queries to MQL.
*
* @param criterion A Mantis Query (old query language) query.
*
* @return A valid MQL query string assuming the input was valid.
*/
public static String transformLegacyQuery(String criterion) {
return criterion.toLowerCase().equals("true") ? "select * where true" :
criterion.toLowerCase().equals("false") ? "select * where false" :
criterion;
}
public static void main(String[] args) {
System.out.println(MQL.makeQuery("abc", "select * from stream where true"));
}
}
| 7,753 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/sink/TaggedDataSourceSink.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka.sink;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.sink.ServerSentEventsSink;
import io.mantisrx.runtime.sink.Sink;
import io.mantisrx.runtime.sink.predicate.Predicate;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.sourcejob.kafka.core.TaggedData;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import rx.Observable;
import rx.Subscription;
import rx.functions.Func2;
public class TaggedDataSourceSink implements Sink<TaggedData> {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private final ServerSentEventsSink<TaggedData> sink;
private Subscription subscription;
static class NoOpProcessor implements Func2<Map<String, List<String>>, Context, Void> {
@Override
public Void call(Map<String, List<String>> t1, Context t2) {
return null;
}
}
public TaggedDataSourceSink() {
this(new NoOpProcessor(), new NoOpProcessor());
}
public TaggedDataSourceSink(Func2<Map<String, List<String>>, Context, Void> preProcessor,
Func2<Map<String, List<String>>, Context, Void> postProcessor) {
this.sink = new ServerSentEventsSink.Builder<TaggedData>()
.withEncoder((data) -> {
try {
return OBJECT_MAPPER.writeValueAsString(data.getPayload());
} catch (JsonProcessingException e) {
e.printStackTrace();
return "{\"error\":" + e.getMessage() + "}";
}
})
.withPredicate(new Predicate<TaggedData>("description", new TaggedEventFilter()))
.withRequestPreprocessor(preProcessor)
.withRequestPostprocessor(postProcessor)
.build();
}
@Override
public void call(Context context, PortRequest portRequest,
Observable<TaggedData> observable) {
observable = observable
.filter((t1) -> !t1.getPayload().isEmpty());
subscription = observable.subscribe();
sink.call(context, portRequest, observable);
}
@Override
public void close() throws IOException {
try {
sink.close();
} finally {
subscription.unsubscribe();
}
}
}
| 7,754 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/sink/TaggedEventFilter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka.sink;
import io.mantisrx.common.MantisProperties;
import io.mantisrx.sourcejob.kafka.core.TaggedData;
import io.mantisrx.sourcejob.kafka.core.utils.SourceJobConstants;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import rx.functions.Func1;
public class TaggedEventFilter implements Func1<Map<String, List<String>>, Func1<TaggedData, Boolean>> {
private static Logger logger = Logger.getLogger(TaggedEventFilter.class);
@Override
public Func1<TaggedData, Boolean> call(Map<String, List<String>> parameters) {
Func1<TaggedData, Boolean> filter = t1 -> true;
if (parameters != null) {
if (parameters.containsKey(SourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME)) {
String subId = parameters.get(SourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME).get(0);
String clientId = parameters.get(SourceJobConstants.CLIENT_ID_PARAMETER_NAME).get(0);
List<String> terms = new ArrayList<String>();
if (clientId != null && !clientId.isEmpty()) {
terms.add(clientId + "_" + subId);
} else {
terms.add(subId);
}
filter = new SourceEventFilter(terms);
}
return filter;
}
return filter;
}
private static class SourceEventFilter implements Func1<TaggedData, Boolean> {
private String jobId = "UNKNOWN";
private String jobName = "UNKNOWN";
private List<String> terms;
SourceEventFilter(List<String> terms) {
this.terms = terms;
String jId = MantisProperties.getProperty("JOB_ID");
if (jId != null && !jId.isEmpty()) {
jobId = jId;
}
String jName = MantisProperties.getProperty("JOB_NAME");
if (jName != null && !jName.isEmpty()) {
jobName = jName;
}
logger.info("Created SourceEventFilter! for subId " + terms.toString() + " in Job : " + jobName + " with Id " + jobId);
}
@Override
public Boolean call(TaggedData data) {
// DynamicCounter.increment("SourceEventFilterCount", "kind","processed","mantisJobId",jobId,"subId",terms.toString());
boolean match = true;
for (String term : terms) {
if (!data.matchesClient(term)) {
match = false;
break;
}
}
return match;
}
}
}
| 7,755 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/sink/QueryRequestPreProcessor.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka.sink;
import static io.mantisrx.sourcejob.kafka.core.utils.SourceJobConstants.CRITERION_PARAM_NAME;
import static io.mantisrx.sourcejob.kafka.core.utils.SourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME;
import io.mantisrx.runtime.Context;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.functions.Func2;
public class QueryRequestPreProcessor implements Func2<Map<String, List<String>>, Context, Void> {
private static final Logger LOGGER = LoggerFactory.getLogger(QueryRequestPreProcessor.class);
public QueryRequestPreProcessor() { }
@Override
public Void call(Map<String, List<String>> queryParams, Context context) {
LOGGER.info("QueryRequestPreProcessor:queryParams: {}", queryParams);
if (queryParams != null) {
if (queryParams.containsKey(SUBSCRIPTION_ID_PARAM_NAME) && queryParams.containsKey(CRITERION_PARAM_NAME)) {
final String subId = queryParams.get(SUBSCRIPTION_ID_PARAM_NAME).get(0);
final String query = queryParams.get(CRITERION_PARAM_NAME).get(0);
final String clientId = queryParams.get("clientId").get(0);
if (subId != null && query != null) {
try {
LOGGER.info("Registering query {}", query);
if (clientId != null && !clientId.isEmpty()) {
registerQuery(clientId + "_" + subId, query);
} else {
registerQuery(subId, query);
}
// TODO - DynamicGauge.set("activeQueries", BasicTagList.of("mantisJobId", context.getJobId(),
// "mantisJobName",context.getWorkerInfo().getJobName()), (double) MQLQueryManager.getInstance().getRegisteredQueries().size());
} catch (Throwable t) {
LOGGER.error("Error registering query", t);
}
}
}
}
return null;
}
private static synchronized void registerQuery(String subId, String query) {
QueryRefCountMap.INSTANCE.addQuery(subId, query);
}
}
| 7,756 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/sink/MQLQueryManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka.sink;
import io.mantisrx.mql.jvm.core.Query;
import java.util.Collection;
import java.util.concurrent.ConcurrentHashMap;
public class MQLQueryManager {
static class LazyHolder {
private static final MQLQueryManager INSTANCE = new MQLQueryManager();
}
private ConcurrentHashMap<String, Query> queries = new ConcurrentHashMap<>();
public static MQLQueryManager getInstance() {
return LazyHolder.INSTANCE;
}
private MQLQueryManager() { }
public void registerQuery(String id, String query) {
query = MQL.transformLegacyQuery(query);
Query q = MQL.makeQuery(id, query);
queries.put(id, q);
}
public void deregisterQuery(String id) {
queries.remove(id);
}
public Collection<Query> getRegisteredQueries() {
return queries.values();
}
public void clear() {
queries.clear();
}
public static void main(String[] args) throws Exception {
MQLQueryManager qm = getInstance();
String query = "SELECT * WHERE true SAMPLE {\"strategy\":\"RANDOM\",\"threshold\":1}";
qm.registerQuery("fake2", query);
System.out.println(MQL.parses(MQL.transformLegacyQuery(query)));
System.out.println(MQL.getParseError(MQL.transformLegacyQuery(query)));
System.out.println(qm.getRegisteredQueries());
}
}
| 7,757 |
0 | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka | Create_ds/mantis/mantis-source-jobs/mantis-source-job-kafka/src/main/java/io/mantisrx/sourcejob/kafka/sink/QueryRefCountMap.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.sourcejob.kafka.sink;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.log4j.Logger;
final class QueryRefCountMap {
private static Logger logger = Logger.getLogger(QueryRefCountMap.class);
public static final QueryRefCountMap INSTANCE = new QueryRefCountMap();
private final ConcurrentHashMap<String, AtomicInteger> refCntMap = new ConcurrentHashMap<>();
private QueryRefCountMap() {
}
void addQuery(String subId, String query) {
logger.info("adding query " + subId + " query " + query);
if (refCntMap.containsKey(subId)) {
int newVal = refCntMap.get(subId).incrementAndGet();
logger.info("query exists already incrementing refcnt to " + newVal);
} else {
MQLQueryManager.getInstance().registerQuery(subId, query);
refCntMap.putIfAbsent(subId, new AtomicInteger(1));
logger.info("new query registering it");
}
}
void removeQuery(String subId) {
if (refCntMap.containsKey(subId)) {
AtomicInteger refCnt = refCntMap.get(subId);
int currVal = refCnt.decrementAndGet();
if (currVal == 0) {
MQLQueryManager.getInstance().deregisterQuery(subId);
refCntMap.remove(subId);
logger.info("All references to query are gone removing query");
} else {
logger.info("References to query still exist. decremeting refcnt to " + currVal);
}
} else {
logger.warn("No query with subscriptionId " + subId);
}
}
public int getUniqueSubscriptionsCount() {
return refCntMap.size();
}
/**
* For testing
*
* @param subId
*
* @return
*/
int getQueryRefCount(String subId) {
if (refCntMap.containsKey(subId)) {
return refCntMap.get(subId).get();
} else {
return 0;
}
}
}
| 7,758 |
0 | Create_ds/mantis/mantis-common/src/test/java/io/reactivx/mantis | Create_ds/mantis/mantis-common/src/test/java/io/reactivx/mantis/operators/OperatorGroupByTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.mantis.operators;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Matchers;
import org.mockito.MockitoAnnotations;
import rx.Notification;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Observer;
import rx.Subscriber;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.observables.GroupedObservable;
import rx.observers.TestSubscriber;
import rx.schedulers.Schedulers;
public class OperatorGroupByTest {
static Func1<GroupedObservable<Integer, Integer>, Observable<Integer>> FLATTEN_INTEGER = new Func1<GroupedObservable<Integer, Integer>, Observable<Integer>>() {
@Override
public Observable<Integer> call(GroupedObservable<Integer, Integer> t) {
return t;
}
};
private static Func1<Long, Boolean> IS_EVEN = new Func1<Long, Boolean>() {
@Override
public Boolean call(Long n) {
return n % 2 == 0;
}
};
private static Func1<Integer, Boolean> IS_EVEN2 = new Func1<Integer, Boolean>() {
@Override
public Boolean call(Integer n) {
return n % 2 == 0;
}
};
final Func1<String, Integer> length = new Func1<String, Integer>() {
@Override
public Integer call(String s) {
return s.length();
}
};
Func1<Integer, Integer> dbl = new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t1) {
return t1 * 2;
}
};
Func1<Integer, Integer> identity = identity();
public static <T> Func1<T, T> identity() {
return new Func1<T, T>() {
@Override
public T call(T o) {
return o;
}
};
}
private static <K, V> Map<K, Collection<V>> toMap(Observable<GroupedObservable<K, V>> observable) {
final ConcurrentHashMap<K, Collection<V>> result = new ConcurrentHashMap<K, Collection<V>>();
observable.toBlocking().forEach(new Action1<GroupedObservable<K, V>>() {
@Override
public void call(final GroupedObservable<K, V> o) {
result.put(o.getKey(), new ConcurrentLinkedQueue<V>());
o.subscribe(new Action1<V>() {
@Override
public void call(V v) {
result.get(o.getKey()).add(v);
}
});
}
});
return result;
}
@Test
public void testGroupBy() {
Observable<String> source = Observable.just("one", "two", "three", "four", "five", "six");
Observable<GroupedObservable<Integer, String>> grouped = source.lift(new OperatorGroupBy<String, Integer, String>(length));
Map<Integer, Collection<String>> map = toMap(grouped);
assertEquals(3, map.size());
assertArrayEquals(Arrays.asList("one", "two", "six").toArray(), map.get(3).toArray());
assertArrayEquals(Arrays.asList("four", "five").toArray(), map.get(4).toArray());
assertArrayEquals(Arrays.asList("three").toArray(), map.get(5).toArray());
}
@Test
public void testGroupByWithElementSelector() {
Observable<String> source = Observable.just("one", "two", "three", "four", "five", "six");
Observable<GroupedObservable<Integer, Integer>> grouped = source.lift(new OperatorGroupBy<String, Integer, Integer>(length, length));
Map<Integer, Collection<Integer>> map = toMap(grouped);
assertEquals(3, map.size());
assertArrayEquals(Arrays.asList(3, 3, 3).toArray(), map.get(3).toArray());
assertArrayEquals(Arrays.asList(4, 4).toArray(), map.get(4).toArray());
assertArrayEquals(Arrays.asList(5).toArray(), map.get(5).toArray());
}
@Test
public void testGroupByWithElementSelector2() {
Observable<String> source = Observable.just("one", "two", "three", "four", "five", "six");
Observable<GroupedObservable<Integer, Integer>> grouped = source.groupBy(length, length);
Map<Integer, Collection<Integer>> map = toMap(grouped);
assertEquals(3, map.size());
assertArrayEquals(Arrays.asList(3, 3, 3).toArray(), map.get(3).toArray());
assertArrayEquals(Arrays.asList(4, 4).toArray(), map.get(4).toArray());
assertArrayEquals(Arrays.asList(5).toArray(), map.get(5).toArray());
}
@Test
public void testEmpty() {
Observable<String> source = Observable.empty();
Observable<GroupedObservable<Integer, String>> grouped = source.lift(new OperatorGroupBy<String, Integer, String>(length));
Map<Integer, Collection<String>> map = toMap(grouped);
assertTrue(map.isEmpty());
}
@Test
public void testError() {
Observable<String> sourceStrings = Observable.just("one", "two", "three", "four", "five", "six");
Observable<String> errorSource = Observable.error(new RuntimeException("forced failure"));
Observable<String> source = Observable.concat(sourceStrings, errorSource);
Observable<GroupedObservable<Integer, String>> grouped = source.lift(new OperatorGroupBy<String, Integer, String>(length));
final AtomicInteger groupCounter = new AtomicInteger();
final AtomicInteger eventCounter = new AtomicInteger();
final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
grouped.flatMap(new Func1<GroupedObservable<Integer, String>, Observable<String>>() {
@Override
public Observable<String> call(final GroupedObservable<Integer, String> o) {
groupCounter.incrementAndGet();
return o.map(new Func1<String, String>() {
@Override
public String call(String v) {
return "Event => key: " + o.getKey() + " value: " + v;
}
});
}
}).subscribe(new Subscriber<String>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
e.printStackTrace();
error.set(e);
}
@Override
public void onNext(String v) {
eventCounter.incrementAndGet();
System.out.println(v);
}
});
assertEquals(3, groupCounter.get());
assertEquals(6, eventCounter.get());
assertNotNull(error.get());
}
/**
* Assert that only a single subscription to a stream occurs and that all events are received.
*
* @throws Throwable
*/
@Test
public void testGroupedEventStream() throws Throwable {
final AtomicInteger eventCounter = new AtomicInteger();
final AtomicInteger subscribeCounter = new AtomicInteger();
final AtomicInteger groupCounter = new AtomicInteger();
final CountDownLatch latch = new CountDownLatch(1);
final int count = 10000;
final int groupCount = 2000;
Observable<Event> es = Observable.create(new Observable.OnSubscribe<Event>() {
@Override
public void call(final Subscriber<? super Event> observer) {
System.out.println("*** Subscribing to EventStream ***");
subscribeCounter.incrementAndGet();
new Thread(new Runnable() {
@Override
public void run() {
for (int i = 0; i < count; i++) {
Event e = new Event();
e.source = i % groupCount;
e.message = "Event-" + i;
observer.onNext(e);
}
observer.onCompleted();
}
}).start();
}
});
es.groupBy(new Func1<Event, Integer>() {
@Override
public Integer call(Event e) {
return e.source;
}
}).flatMap(new Func1<GroupedObservable<Integer, Event>, Observable<String>>() {
@Override
public Observable<String> call(GroupedObservable<Integer, Event> eventGroupedObservable) {
System.out.println("GroupedObservable Key: " + eventGroupedObservable.getKey());
groupCounter.incrementAndGet();
return eventGroupedObservable.map(new Func1<Event, String>() {
@Override
public String call(Event event) {
return "Source: " + event.source + " Message: " + event.message;
}
});
}
}).subscribe(new Subscriber<String>() {
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public void onError(Throwable e) {
e.printStackTrace();
latch.countDown();
}
@Override
public void onNext(String outputMessage) {
System.out.println(outputMessage);
eventCounter.incrementAndGet();
}
});
latch.await(5000, TimeUnit.MILLISECONDS);
assertEquals(1, subscribeCounter.get());
assertEquals(groupCount, groupCounter.get());
assertEquals(count, eventCounter.get());
}
/*
* We will only take 1 group with 20 events from it and then unsubscribe.
*/
@Test
public void testUnsubscribeOnNestedTakeAndSyncInfiniteStream() throws InterruptedException {
final AtomicInteger subscribeCounter = new AtomicInteger();
final AtomicInteger sentEventCounter = new AtomicInteger();
doTestUnsubscribeOnNestedTakeAndAsyncInfiniteStream(SYNC_INFINITE_OBSERVABLE_OF_EVENT(2, subscribeCounter, sentEventCounter), subscribeCounter);
Thread.sleep(500);
assertEquals(39, sentEventCounter.get());
}
/*
* We will only take 1 group with 20 events from it and then unsubscribe.
*/
@Test
public void testUnsubscribeOnNestedTakeAndAsyncInfiniteStream() throws InterruptedException {
final AtomicInteger subscribeCounter = new AtomicInteger();
final AtomicInteger sentEventCounter = new AtomicInteger();
doTestUnsubscribeOnNestedTakeAndAsyncInfiniteStream(ASYNC_INFINITE_OBSERVABLE_OF_EVENT(2, subscribeCounter, sentEventCounter), subscribeCounter);
Thread.sleep(500);
assertEquals(39, sentEventCounter.get());
}
private void doTestUnsubscribeOnNestedTakeAndAsyncInfiniteStream(Observable<Event> es, AtomicInteger subscribeCounter) throws InterruptedException {
final AtomicInteger eventCounter = new AtomicInteger();
final AtomicInteger groupCounter = new AtomicInteger();
final CountDownLatch latch = new CountDownLatch(1);
es.groupBy(new Func1<Event, Integer>() {
@Override
public Integer call(Event e) {
return e.source;
}
})
.take(1) // we want only the first group
.flatMap(new Func1<GroupedObservable<Integer, Event>, Observable<String>>() {
@Override
public Observable<String> call(GroupedObservable<Integer, Event> eventGroupedObservable) {
System.out.println("testUnsubscribe => GroupedObservable Key: " + eventGroupedObservable.getKey());
groupCounter.incrementAndGet();
return eventGroupedObservable
.take(20) // limit to only 20 events on this group
.map(new Func1<Event, String>() {
@Override
public String call(Event event) {
return "testUnsubscribe => Source: " + event.source + " Message: " + event.message;
}
});
}
}).subscribe(new Subscriber<String>() {
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public void onError(Throwable e) {
e.printStackTrace();
latch.countDown();
}
@Override
public void onNext(String outputMessage) {
System.out.println(outputMessage);
eventCounter.incrementAndGet();
}
});
if (!latch.await(2000, TimeUnit.MILLISECONDS)) {
fail("timed out so likely did not unsubscribe correctly");
}
assertEquals(1, subscribeCounter.get());
assertEquals(1, groupCounter.get());
assertEquals(20, eventCounter.get());
// sentEvents will go until 'eventCounter' hits 20 and then unsubscribes
// which means it will also send (but ignore) the 19/20 events for the other group
// It will not however send all 100 events.
}
@Test
public void testUnsubscribeViaTakeOnGroupThenMergeAndTake() {
final AtomicInteger subscribeCounter = new AtomicInteger();
final AtomicInteger sentEventCounter = new AtomicInteger();
final AtomicInteger eventCounter = new AtomicInteger();
SYNC_INFINITE_OBSERVABLE_OF_EVENT(4, subscribeCounter, sentEventCounter)
.groupBy(new Func1<Event, Integer>() {
@Override
public Integer call(Event e) {
return e.source;
}
})
// take 2 of the 4 groups
.take(2)
.flatMap(new Func1<GroupedObservable<Integer, Event>, Observable<String>>() {
@Override
public Observable<String> call(GroupedObservable<Integer, Event> eventGroupedObservable) {
return eventGroupedObservable
.map(new Func1<Event, String>() {
@Override
public String call(Event event) {
return "testUnsubscribe => Source: " + event.source + " Message: " + event.message;
}
});
}
})
.take(30).subscribe(new Action1<String>() {
@Override
public void call(String s) {
eventCounter.incrementAndGet();
System.out.println("=> " + s);
}
});
assertEquals(30, eventCounter.get());
// we should send 28 additional events that are filtered out as they are in the groups we skip
assertEquals(58, sentEventCounter.get());
}
@Test
public void testUnsubscribeViaTakeOnGroupThenTakeOnInner() {
final AtomicInteger subscribeCounter = new AtomicInteger();
final AtomicInteger sentEventCounter = new AtomicInteger();
final AtomicInteger eventCounter = new AtomicInteger();
SYNC_INFINITE_OBSERVABLE_OF_EVENT(4, subscribeCounter, sentEventCounter)
.groupBy(new Func1<Event, Integer>() {
@Override
public Integer call(Event e) {
return e.source;
}
})
// take 2 of the 4 groups
.take(2)
.flatMap(new Func1<GroupedObservable<Integer, Event>, Observable<String>>() {
@Override
public Observable<String> call(GroupedObservable<Integer, Event> eventGroupedObservable) {
int numToTake = 0;
if (eventGroupedObservable.getKey() == 1) {
numToTake = 10;
} else if (eventGroupedObservable.getKey() == 2) {
numToTake = 5;
}
return eventGroupedObservable
.take(numToTake)
.map(new Func1<Event, String>() {
@Override
public String call(Event event) {
return "testUnsubscribe => Source: " + event.source + " Message: " + event.message;
}
});
}
})
.subscribe(new Action1<String>() {
@Override
public void call(String s) {
eventCounter.incrementAndGet();
System.out.println("=> " + s);
}
});
assertEquals(15, eventCounter.get());
// we should send 22 additional events that are filtered out as they are skipped while taking the 15 we want
assertEquals(37, sentEventCounter.get());
}
@Test
public void testStaggeredCompletion() throws InterruptedException {
final AtomicInteger eventCounter = new AtomicInteger();
final CountDownLatch latch = new CountDownLatch(1);
Observable.range(0, 100)
.groupBy(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer i) {
return i % 2;
}
})
.flatMap(new Func1<GroupedObservable<Integer, Integer>, Observable<Integer>>() {
@Override
public Observable<Integer> call(GroupedObservable<Integer, Integer> group) {
if (group.getKey() == 0) {
return group.delay(100, TimeUnit.MILLISECONDS).map(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t) {
return t * 10;
}
});
} else {
return group;
}
}
})
.subscribe(new Subscriber<Integer>() {
@Override
public void onCompleted() {
System.out.println("=> onCompleted");
latch.countDown();
}
@Override
public void onError(Throwable e) {
e.printStackTrace();
latch.countDown();
}
@Override
public void onNext(Integer s) {
eventCounter.incrementAndGet();
System.out.println("=> " + s);
}
});
if (!latch.await(3000, TimeUnit.MILLISECONDS)) {
fail("timed out");
}
assertEquals(100, eventCounter.get());
}
@Test(timeout = 1000)
public void testCompletionIfInnerNotSubscribed() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final AtomicInteger eventCounter = new AtomicInteger();
Observable.range(0, 100)
.groupBy(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer i) {
return i % 2;
}
})
.subscribe(new Subscriber<GroupedObservable<Integer, Integer>>() {
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public void onError(Throwable e) {
e.printStackTrace();
latch.countDown();
}
@Override
public void onNext(GroupedObservable<Integer, Integer> s) {
eventCounter.incrementAndGet();
System.out.println("=> " + s);
}
});
if (!latch.await(500, TimeUnit.MILLISECONDS)) {
fail("timed out - never got completion");
}
assertEquals(2, eventCounter.get());
}
@Test
public void testIgnoringGroups() {
final AtomicInteger subscribeCounter = new AtomicInteger();
final AtomicInteger sentEventCounter = new AtomicInteger();
final AtomicInteger eventCounter = new AtomicInteger();
SYNC_INFINITE_OBSERVABLE_OF_EVENT(4, subscribeCounter, sentEventCounter)
.groupBy(new Func1<Event, Integer>() {
@Override
public Integer call(Event e) {
return e.source;
}
})
.flatMap(new Func1<GroupedObservable<Integer, Event>, Observable<String>>() {
@Override
public Observable<String> call(GroupedObservable<Integer, Event> eventGroupedObservable) {
Observable<Event> eventStream = eventGroupedObservable;
if (eventGroupedObservable.getKey() >= 2) {
// filter these
eventStream = eventGroupedObservable.filter(new Func1<Event, Boolean>() {
@Override
public Boolean call(Event t1) {
return false;
}
});
}
return eventStream
.map(new Func1<Event, String>() {
@Override
public String call(Event event) {
return "testUnsubscribe => Source: " + event.source + " Message: " + event.message;
}
});
}
})
.take(30).subscribe(new Action1<String>() {
@Override
public void call(String s) {
eventCounter.incrementAndGet();
System.out.println("=> " + s);
}
});
assertEquals(30, eventCounter.get());
// we should send 30 additional events that are filtered out as they are in the groups we skip
assertEquals(60, sentEventCounter.get());
}
@Test
public void testFirstGroupsCompleteAndParentSlowToThenEmitFinalGroupsAndThenComplete() throws InterruptedException {
final CountDownLatch first = new CountDownLatch(2); // there are two groups to first complete
final ArrayList<String> results = new ArrayList<String>();
Observable.create(new OnSubscribe<Integer>() {
@Override
public void call(Subscriber<? super Integer> sub) {
sub.onNext(1);
sub.onNext(2);
sub.onNext(1);
sub.onNext(2);
try {
first.await();
} catch (InterruptedException e) {
sub.onError(e);
return;
}
sub.onNext(3);
sub.onNext(3);
sub.onCompleted();
}
}).groupBy(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t) {
return t;
}
}).flatMap(new Func1<GroupedObservable<Integer, Integer>, Observable<String>>() {
@Override
public Observable<String> call(final GroupedObservable<Integer, Integer> group) {
if (group.getKey() < 3) {
return group.map(new Func1<Integer, String>() {
@Override
public String call(Integer t1) {
return "first groups: " + t1;
}
})
// must take(2) so an onCompleted + unsubscribe happens on these first 2 groups
.take(2).doOnCompleted(new Action0() {
@Override
public void call() {
first.countDown();
}
});
} else {
return group.map(new Func1<Integer, String>() {
@Override
public String call(Integer t1) {
return "last group: " + t1;
}
});
}
}
}).toBlocking().forEach(new Action1<String>() {
@Override
public void call(String s) {
results.add(s);
}
});
System.out.println("Results: " + results);
assertEquals(6, results.size());
}
@Test
public void testFirstGroupsCompleteAndParentSlowToThenEmitFinalGroupsWhichThenSubscribesOnAndDelaysAndThenCompletes() throws InterruptedException {
System.err.println("----------------------------------------------------------------------------------------------");
final CountDownLatch first = new CountDownLatch(2); // there are two groups to first complete
final ArrayList<String> results = new ArrayList<String>();
Observable.create(new OnSubscribe<Integer>() {
@Override
public void call(Subscriber<? super Integer> sub) {
sub.onNext(1);
sub.onNext(2);
sub.onNext(1);
sub.onNext(2);
try {
first.await();
} catch (InterruptedException e) {
sub.onError(e);
return;
}
sub.onNext(3);
sub.onNext(3);
sub.onCompleted();
}
}).groupBy(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t) {
return t;
}
}).flatMap(new Func1<GroupedObservable<Integer, Integer>, Observable<String>>() {
@Override
public Observable<String> call(final GroupedObservable<Integer, Integer> group) {
if (group.getKey() < 3) {
return group.map(new Func1<Integer, String>() {
@Override
public String call(Integer t1) {
return "first groups: " + t1;
}
})
// must take(2) so an onCompleted + unsubscribe happens on these first 2 groups
.take(2).doOnCompleted(new Action0() {
@Override
public void call() {
first.countDown();
}
});
} else {
return group.subscribeOn(Schedulers.newThread()).delay(400, TimeUnit.MILLISECONDS).map(new Func1<Integer, String>() {
@Override
public String call(Integer t1) {
return "last group: " + t1;
}
}).doOnEach(new Action1<Notification<? super String>>() {
@Override
public void call(Notification<? super String> t1) {
System.err.println("subscribeOn notification => " + t1);
}
});
}
}
}).doOnEach(new Action1<Notification<? super String>>() {
@Override
public void call(Notification<? super String> t1) {
System.err.println("outer notification => " + t1);
}
}).toBlocking().forEach(new Action1<String>() {
@Override
public void call(String s) {
results.add(s);
}
});
System.out.println("Results: " + results);
assertEquals(6, results.size());
}
;
@Test
public void testFirstGroupsCompleteAndParentSlowToThenEmitFinalGroupsWhichThenObservesOnAndDelaysAndThenCompletes() throws InterruptedException {
final CountDownLatch first = new CountDownLatch(2); // there are two groups to first complete
final ArrayList<String> results = new ArrayList<String>();
Observable.create(new OnSubscribe<Integer>() {
@Override
public void call(Subscriber<? super Integer> sub) {
sub.onNext(1);
sub.onNext(2);
sub.onNext(1);
sub.onNext(2);
try {
first.await();
} catch (InterruptedException e) {
sub.onError(e);
return;
}
sub.onNext(3);
sub.onNext(3);
sub.onCompleted();
}
}).groupBy(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t) {
return t;
}
}).flatMap(new Func1<GroupedObservable<Integer, Integer>, Observable<String>>() {
@Override
public Observable<String> call(final GroupedObservable<Integer, Integer> group) {
if (group.getKey() < 3) {
return group.map(new Func1<Integer, String>() {
@Override
public String call(Integer t1) {
return "first groups: " + t1;
}
})
// must take(2) so an onCompleted + unsubscribe happens on these first 2 groups
.take(2).doOnCompleted(new Action0() {
@Override
public void call() {
first.countDown();
}
});
} else {
return group.observeOn(Schedulers.newThread()).delay(400, TimeUnit.MILLISECONDS).map(new Func1<Integer, String>() {
@Override
public String call(Integer t1) {
return "last group: " + t1;
}
});
}
}
}).toBlocking().forEach(new Action1<String>() {
@Override
public void call(String s) {
results.add(s);
}
});
System.out.println("Results: " + results);
assertEquals(6, results.size());
}
;
@Test
public void testGroupsWithNestedSubscribeOn() throws InterruptedException {
final ArrayList<String> results = new ArrayList<String>();
Observable.create(new OnSubscribe<Integer>() {
@Override
public void call(Subscriber<? super Integer> sub) {
sub.onNext(1);
sub.onNext(2);
sub.onNext(1);
sub.onNext(2);
sub.onCompleted();
}
}).groupBy(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t) {
return t;
}
}).flatMap(new Func1<GroupedObservable<Integer, Integer>, Observable<String>>() {
@Override
public Observable<String> call(final GroupedObservable<Integer, Integer> group) {
return group.subscribeOn(Schedulers.newThread()).map(new Func1<Integer, String>() {
@Override
public String call(Integer t1) {
System.out.println("Received: " + t1 + " on group : " + group.getKey());
return "first groups: " + t1;
}
});
}
}).doOnEach(new Action1<Notification<? super String>>() {
@Override
public void call(Notification<? super String> t1) {
System.out.println("notification => " + t1);
}
}).toBlocking().forEach(new Action1<String>() {
@Override
public void call(String s) {
results.add(s);
}
});
System.out.println("Results: " + results);
assertEquals(4, results.size());
}
@Test
public void testGroupsWithNestedObserveOn() throws InterruptedException {
final ArrayList<String> results = new ArrayList<String>();
Observable.create(new OnSubscribe<Integer>() {
@Override
public void call(Subscriber<? super Integer> sub) {
sub.onNext(1);
sub.onNext(2);
sub.onNext(1);
sub.onNext(2);
sub.onCompleted();
}
}).groupBy(new Func1<Integer, Integer>() {
@Override
public Integer call(Integer t) {
return t;
}
}).flatMap(new Func1<GroupedObservable<Integer, Integer>, Observable<String>>() {
@Override
public Observable<String> call(final GroupedObservable<Integer, Integer> group) {
return group.observeOn(Schedulers.newThread()).delay(400, TimeUnit.MILLISECONDS).map(new Func1<Integer, String>() {
@Override
public String call(Integer t1) {
return "first groups: " + t1;
}
});
}
}).toBlocking().forEach(new Action1<String>() {
@Override
public void call(String s) {
results.add(s);
}
});
System.out.println("Results: " + results);
assertEquals(4, results.size());
}
Observable<Event> ASYNC_INFINITE_OBSERVABLE_OF_EVENT(final int numGroups, final AtomicInteger subscribeCounter, final AtomicInteger sentEventCounter) {
return SYNC_INFINITE_OBSERVABLE_OF_EVENT(numGroups, subscribeCounter, sentEventCounter).subscribeOn(Schedulers.newThread());
}
Observable<Event> SYNC_INFINITE_OBSERVABLE_OF_EVENT(final int numGroups, final AtomicInteger subscribeCounter, final AtomicInteger sentEventCounter) {
return Observable.create(new OnSubscribe<Event>() {
@Override
public void call(final Subscriber<? super Event> op) {
subscribeCounter.incrementAndGet();
int i = 0;
while (!op.isUnsubscribed()) {
i++;
Event e = new Event();
e.source = i % numGroups;
e.message = "Event-" + i;
op.onNext(e);
sentEventCounter.incrementAndGet();
}
op.onCompleted();
}
});
}
@Test
public void testGroupByOnAsynchronousSourceAcceptsMultipleSubscriptions() throws InterruptedException {
// choose an asynchronous source
Observable<Long> source = Observable.interval(10, TimeUnit.MILLISECONDS).take(1);
// apply groupBy to the source
Observable<GroupedObservable<Boolean, Long>> stream = source.groupBy(IS_EVEN);
// create two observers
@SuppressWarnings("unchecked")
Observer<GroupedObservable<Boolean, Long>> o1 = mock(Observer.class);
@SuppressWarnings("unchecked")
Observer<GroupedObservable<Boolean, Long>> o2 = mock(Observer.class);
// subscribe with the observers
stream.subscribe(o1);
stream.subscribe(o2);
// check that subscriptions were successful
verify(o1, never()).onError(Matchers.<Throwable>any());
verify(o2, never()).onError(Matchers.<Throwable>any());
}
@Test
public void testGroupByBackpressure() throws InterruptedException {
TestSubscriber<String> ts = new TestSubscriber<String>();
Observable.range(1, 4000)
.groupBy(IS_EVEN2)
.flatMap(new Func1<GroupedObservable<Boolean, Integer>, Observable<String>>() {
@Override
public Observable<String> call(final GroupedObservable<Boolean, Integer> g) {
return g.observeOn(Schedulers.computation()).map(new Func1<Integer, String>() {
@Override
public String call(Integer l) {
if (g.getKey()) {
try {
Thread.sleep(1);
} catch (InterruptedException e) {
}
return l + " is even.";
} else {
return l + " is odd.";
}
}
});
}
}).subscribe(ts);
ts.awaitTerminalEvent();
ts.assertNoErrors();
}
<T, R> Func1<T, R> just(final R value) {
return new Func1<T, R>() {
@Override
public R call(T t1) {
return value;
}
};
}
<T> Func1<Integer, T> fail(T dummy) {
return new Func1<Integer, T>() {
@Override
public T call(Integer t1) {
throw new RuntimeException("Forced failure");
}
};
}
<T, R> Func1<T, R> fail2(R dummy2) {
return new Func1<T, R>() {
@Override
public R call(T t1) {
throw new RuntimeException("Forced failure");
}
};
}
@Before
public void before() {
MockitoAnnotations.initMocks(this);
}
@Test
public void normalBehavior() {
Observable<String> source = Observable.from(Arrays.asList(
" foo",
" FoO ",
"baR ",
"foO ",
" Baz ",
" qux ",
" bar",
" BAR ",
"FOO ",
"baz ",
" bAZ ",
" fOo "
));
/**
* foo FoO foO FOO fOo
* baR bar BAR
* Baz baz bAZ
* qux
*
*/
Func1<String, String> keysel = new Func1<String, String>() {
@Override
public String call(String t1) {
return t1.trim().toLowerCase();
}
};
Func1<String, String> valuesel = new Func1<String, String>() {
@Override
public String call(String t1) {
return t1 + t1;
}
};
Observable<String> m = source.groupBy(
keysel, valuesel).flatMap(new Func1<GroupedObservable<String, String>, Observable<String>>() {
@Override
public Observable<String> call(final GroupedObservable<String, String> g) {
System.out.println("-----------> NEXT: " + g.getKey());
return g.take(2).map(new Func1<String, String>() {
int count = 0;
@Override
public String call(String v) {
return g.getKey() + "-" + count++;
}
});
}
});
TestSubscriber<String> ts = new TestSubscriber<String>();
m.subscribe(ts);
ts.awaitTerminalEvent();
System.out.println("ts .get " + ts.getOnNextEvents());
ts.assertNoErrors();
assertEquals(ts.getOnNextEvents(),
Arrays.asList("foo-0", "foo-1", "bar-0", "foo-0", "baz-0", "qux-0", "bar-1", "bar-0", "foo-1", "baz-1", "baz-0", "foo-0"));
}
@Test
public void keySelectorThrows() {
Observable<Integer> source = Observable.just(0, 1, 2, 3, 4, 5, 6);
Observable<Integer> m = source.groupBy(fail(0), dbl).flatMap(FLATTEN_INTEGER);
TestSubscriber<Integer> ts = new TestSubscriber<Integer>();
m.subscribe(ts);
ts.awaitTerminalEvent();
assertEquals(1, ts.getOnErrorEvents().size());
assertEquals(0, ts.getOnNextEvents().size());
}
@Test
public void valueSelectorThrows() {
Observable<Integer> source = Observable.just(0, 1, 2, 3, 4, 5, 6);
Observable<Integer> m = source.groupBy(identity, fail(0)).flatMap(FLATTEN_INTEGER);
TestSubscriber<Integer> ts = new TestSubscriber<Integer>();
m.subscribe(ts);
ts.awaitTerminalEvent();
assertEquals(1, ts.getOnErrorEvents().size());
assertEquals(0, ts.getOnNextEvents().size());
}
@Test
public void innerEscapeCompleted() {
Observable<Integer> source = Observable.just(0);
Observable<Integer> m = source.groupBy(identity, dbl).flatMap(FLATTEN_INTEGER);
TestSubscriber<Object> ts = new TestSubscriber<Object>();
m.subscribe(ts);
ts.awaitTerminalEvent();
ts.assertNoErrors();
System.out.println(ts.getOnNextEvents());
}
/**
* Assert we get an IllegalStateException if trying to subscribe to an inner GroupedObservable more than once
*/
@Test
public void testExceptionIfSubscribeToChildMoreThanOnce() {
Observable<Integer> source = Observable.just(0);
final AtomicReference<GroupedObservable<Integer, Integer>> inner = new AtomicReference<GroupedObservable<Integer, Integer>>();
Observable<GroupedObservable<Integer, Integer>> m = source.groupBy(identity, dbl);
m.subscribe(new Action1<GroupedObservable<Integer, Integer>>() {
@Override
public void call(GroupedObservable<Integer, Integer> t1) {
inner.set(t1);
}
});
inner.get().subscribe();
@SuppressWarnings("unchecked")
Observer<Integer> o2 = mock(Observer.class);
inner.get().subscribe(o2);
verify(o2, never()).onCompleted();
verify(o2, never()).onNext(anyInt());
verify(o2).onError(any(IllegalStateException.class));
}
@Test
public void testgroupByBackpressure() throws InterruptedException {
TestSubscriber<String> ts = new TestSubscriber<String>();
Observable.range(1, 4000).groupBy(IS_EVEN2).flatMap(new Func1<GroupedObservable<Boolean, Integer>, Observable<String>>() {
@Override
public Observable<String> call(final GroupedObservable<Boolean, Integer> g) {
return g.doOnCompleted(new Action0() {
@Override
public void call() {
System.out.println("//////////////////// COMPLETED-A");
}
}).observeOn(Schedulers.computation()).map(new Func1<Integer, String>() {
int c = 0;
@Override
public String call(Integer l) {
if (g.getKey()) {
if (c++ < 400) {
try {
Thread.sleep(1);
} catch (InterruptedException e) {
}
}
return l + " is even.";
} else {
return l + " is odd.";
}
}
}).doOnCompleted(new Action0() {
@Override
public void call() {
System.out.println("//////////////////// COMPLETED-B");
}
});
}
}).doOnEach(new Action1<Notification<? super String>>() {
@Override
public void call(Notification<? super String> t1) {
System.out.println("NEXT: " + t1);
}
}).subscribe(ts);
ts.awaitTerminalEvent();
ts.assertNoErrors();
}
@Test
public void testgroupByBackpressure2() throws InterruptedException {
TestSubscriber<String> ts = new TestSubscriber<String>();
Observable.range(1, 4000).groupBy(IS_EVEN2).flatMap(new Func1<GroupedObservable<Boolean, Integer>, Observable<String>>() {
@Override
public Observable<String> call(final GroupedObservable<Boolean, Integer> g) {
return g.take(2).observeOn(Schedulers.computation()).map(new Func1<Integer, String>() {
@Override
public String call(Integer l) {
if (g.getKey()) {
try {
Thread.sleep(1);
} catch (InterruptedException e) {
}
return l + " is even.";
} else {
return l + " is odd.";
}
}
});
}
}).subscribe(ts);
ts.awaitTerminalEvent();
ts.assertNoErrors();
}
private static class Event {
int source;
String message;
@Override
public String toString() {
return "Event => source: " + source + " message: " + message;
}
}
} | 7,759 |
0 | Create_ds/mantis/mantis-common/src/test/java/io/reactivx/mantis | Create_ds/mantis/mantis-common/src/test/java/io/reactivx/mantis/operators/BufferOnBackPressureOperatorTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.mantis.operators;
public class BufferOnBackPressureOperatorTest {
// @Before
// public void init() {
// System.setProperty("rx.ring-buffer.size","1024");
// }
// @Test
// public void testBufferAndDrop() throws InterruptedException {
// final AtomicInteger eventsReceived = new AtomicInteger();
// final CountDownLatch onCompleteReceived = new CountDownLatch(1);
// Observable<Integer> es = Observable.create(new Observable.OnSubscribe<Integer>() {
//
// @Override
// public void call(final Subscriber<? super Integer> observer) {
// System.out.println("*** Subscribing to EventStream ***");
//
// new Thread(new Runnable() {
//
// @Override
// public void run() {
// for (int i = 0; i < 4000; i++) {
//
// observer.onNext(i);
// }
// observer.onCompleted();
// }
//
// }).start();
// }
//
// });
//
//
// es.lift(new BufferOnBackPressureOperator<Integer>("a",100))
// .observeOn(Schedulers.io())
// .subscribe(new Observer<Integer>() {
//
// @Override
// public void onCompleted() {
// System.out.println("got oncomplete");
// onCompleteReceived.countDown();
//
// }
//
// @Override
// public void onError(Throwable e) {
// e.printStackTrace();
//
// }
//
// @Override
// public void onNext(Integer t) {
// try {
// Thread.sleep(10);
// } catch (InterruptedException e) {
// // TODO Auto-generated catch block
// e.printStackTrace();
// }
//// System.out.println("got " + t);
// eventsReceived.incrementAndGet();
//
// }
//
// });
// onCompleteReceived.await();
// assertEquals(1124, eventsReceived.get());
// }
//
// @Test
// public void testOnComplete() throws InterruptedException {
// final AtomicInteger eventsReceived = new AtomicInteger();
// final CountDownLatch onCompleteReceived = new CountDownLatch(1);
// Observable<Integer> es = Observable.create(new Observable.OnSubscribe<Integer>() {
//
// @Override
// public void call(final Subscriber<? super Integer> observer) {
// System.out.println("*** Subscribing to EventStream ***");
//
// new Thread(new Runnable() {
//
// @Override
// public void run() {
// for (int i = 0; i < 1034; i++) {
//
// observer.onNext(i);
// }
// observer.onCompleted();
// }
//
// }).start();
// }
//
// });
//
//
// es.lift(new BufferOnBackPressureOperator<Integer>("a",100))
// .observeOn(Schedulers.io())
// .subscribe(new Observer<Integer>() {
//
// @Override
// public void onCompleted() {
// System.out.println("got oncomplete");
// onCompleteReceived.countDown();
//
// }
//
// @Override
// public void onError(Throwable e) {
// e.printStackTrace();
//
// }
//
// @Override
// public void onNext(Integer t) {
// try {
// Thread.sleep(10);
// } catch (InterruptedException e) {
// // TODO Auto-generated catch block
// e.printStackTrace();
// }
// // System.out.println("got " + t);
// eventsReceived.incrementAndGet();
//
// }
//
// });
// onCompleteReceived.await();
// assertEquals(1034, eventsReceived.get());
// }
//
// @Test
// public void testOnError() throws InterruptedException {
// final AtomicInteger eventsReceived = new AtomicInteger();
// final CountDownLatch onErrorReceived = new CountDownLatch(1);
//
// Observable<Integer> es = Observable.create(new Observable.OnSubscribe<Integer>() {
//
// @Override
// public void call(final Subscriber<? super Integer> observer) {
// System.out.println("*** Subscribing to EventStream ***");
//
// new Thread(new Runnable() {
//
// @Override
// public void run() {
// for (int i = 0; i < 2000; i++) {
//
// observer.onNext(i);
// if(i == 1124) {
//
// observer.onError(new Exception("forced error"));
// break;
// }
// }
// // observer.onCompleted();
// }
//
// }).start();
// }
//
// });
//
//
// es.lift(new BufferOnBackPressureOperator<Integer>("a",100))
// .observeOn(Schedulers.io())
// .subscribe(new Observer<Integer>() {
//
// @Override
// public void onCompleted() {
//
//
// }
//
// @Override
// public void onError(Throwable e) {
// e.printStackTrace();
// onErrorReceived.countDown();
// }
//
// @Override
// public void onNext(Integer t) {
// try {
// Thread.sleep(10);
// } catch (InterruptedException e) {
// // TODO Auto-generated catch block
// e.printStackTrace();
// }
// // System.out.println("got " + t);
// eventsReceived.incrementAndGet();
//
// }
//
// });
// onErrorReceived.await();
//// assertEquals(1, eventsReceived.get());
// }
//
// @Test
// public void testNoBufferCompletes() throws InterruptedException {
// final AtomicInteger eventsReceived = new AtomicInteger();
// final CountDownLatch onCompleteReceived = new CountDownLatch(1);
//
// Observable<Integer> es = Observable.create(new Observable.OnSubscribe<Integer>() {
//
// @Override
// public void call(final Subscriber<? super Integer> observer) {
// System.out.println("*** Subscribing to EventStream ***");
//
// new Thread(new Runnable() {
//
// @Override
// public void run() {
// for (int i = 0; i < 2000; i++) {
//
// observer.onNext(i);
//
// }
// observer.onCompleted();
// }
//
// }).start();
// }
//
// });
//
//
// es.lift(new BufferOnBackPressureOperator<Integer>("a",100))
//
// .subscribe(new Observer<Integer>() {
//
// @Override
// public void onCompleted() {
//
// onCompleteReceived.countDown();
// }
//
// @Override
// public void onError(Throwable e) {
// e.printStackTrace();
//
// }
//
// @Override
// public void onNext(Integer t) {
// // System.out.println("got " + t);
// eventsReceived.incrementAndGet();
//
// }
//
// });
// onCompleteReceived.await();
// assertEquals(2000, eventsReceived.get());
// }
//
}
| 7,760 |
0 | Create_ds/mantis/mantis-common/src/test/java/io/reactivx/common | Create_ds/mantis/mantis-common/src/test/java/io/reactivx/common/consistenthashing/ServerSlotManagerTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.common.consistenthashing;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import io.mantisrx.common.network.HashFunctions;
import io.mantisrx.common.network.ServerSlotManager;
import io.mantisrx.common.network.ServerSlotManager.SlotAssignmentManager;
import io.mantisrx.common.network.WritableEndpoint;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Test;
public class ServerSlotManagerTest {
@Test
public void oneNodeTest() {
WritableEndpoint<Void> n1 = new WritableEndpoint<Void>("host1", 7001);
Map<String, List<String>> params = new HashMap<String, List<String>>();
ServerSlotManager<Void> ssm = new ServerSlotManager<Void>(HashFunctions.ketama());
SlotAssignmentManager<Void> sam = ssm.registerServer(n1, params);
int hostHitCountNode1 = 0;
int nonHitCount = 0;
int MSG_COUNT = 100000;
for (int i = 0; i < MSG_COUNT; i++) {
if (sam.filter(n1, ("msg:" + i).getBytes())) {
hostHitCountNode1++;
} else {
nonHitCount++;
}
}
assertTrue(nonHitCount == 0);
assertEquals(MSG_COUNT, hostHitCountNode1);
//cleanup
ssm.deregisterServer(n1, params);
}
@Test
public void twoNodeSameClientIdTest() {
WritableEndpoint<Void> n1 = new WritableEndpoint<Void>("host1", 7001);
WritableEndpoint<Void> n2 = new WritableEndpoint<Void>("host2", 7001);
Map<String, List<String>> params = new HashMap<String, List<String>>();
List<String> vals = new ArrayList<String>();
vals.add("client1");
params.put("clientId", vals);
ServerSlotManager<Void> ssm = new ServerSlotManager<Void>(HashFunctions.ketama());
SlotAssignmentManager<Void> sm = ssm.registerServer(n1, params);
SlotAssignmentManager<Void> sm2 = ssm.registerServer(n2, params);
assertEquals(sm, sm2);
int hostHitCountNode1 = 0;
int hostHitCountNode2 = 0;
int nonHitCount = 0;
int MSG_COUNT = 1000000;
for (int i = 0; i < MSG_COUNT; i++) {
String msg = "msg:" + i;
if (sm.filter(n1, msg.getBytes())) {
hostHitCountNode1++;
} else if (sm2.filter(n2, msg.getBytes())) {
hostHitCountNode2++;
} else {
nonHitCount++;
}
}
double host1HitPercentage = (double) hostHitCountNode1 / (double) MSG_COUNT;
System.out.println("host1 hit % " + host1HitPercentage);
assertTrue(host1HitPercentage > 0.4 && host1HitPercentage < 0.6);
assertTrue(nonHitCount == 0);
assertEquals(MSG_COUNT, hostHitCountNode1 + hostHitCountNode2);
ssm.deregisterServer(n1, params);
ssm.deregisterServer(n2, params);
}
@Test
public void threeNodeSameClientIdTest() {
WritableEndpoint<Void> n1 = new WritableEndpoint<Void>("host1", 7001);
WritableEndpoint<Void> n2 = new WritableEndpoint<Void>("host2", 7001);
Map<String, List<String>> params = new HashMap<String, List<String>>();
List<String> vals = new ArrayList<String>();
vals.add("client1");
params.put("clientId", vals);
ServerSlotManager<Void> ssm = new ServerSlotManager<Void>(HashFunctions.ketama());
SlotAssignmentManager<Void> sm = ssm.registerServer(n1, params);
SlotAssignmentManager<Void> sm2 = ssm.registerServer(n2, params);
assertEquals(sm, sm2);
int hostHitCountNode1 = 0;
int hostHitCountNode2 = 0;
int nonHitCount = 0;
int MSG_COUNT = 1000000;
for (int i = 0; i < MSG_COUNT; i++) {
String msg = "msg:" + i;
if (sm.filter(n1, msg.getBytes())) {
hostHitCountNode1++;
} else if (sm2.filter(n2, msg.getBytes())) {
hostHitCountNode2++;
} else {
nonHitCount++;
}
}
double host1HitPercentage = (double) hostHitCountNode1 / (double) MSG_COUNT;
System.out.println("host1 hit % " + host1HitPercentage);
assertTrue(host1HitPercentage > 0.4 && host1HitPercentage < 0.6);
assertTrue(nonHitCount == 0);
assertEquals(MSG_COUNT, hostHitCountNode1 + hostHitCountNode2);
WritableEndpoint<Void> n3 = new WritableEndpoint<Void>("host3", 7001);
// add another node
SlotAssignmentManager<Void> sm3 = ssm.registerServer(n3, params);
hostHitCountNode1 = 0;
hostHitCountNode2 = 0;
int hostHitCountNode3 = 0;
nonHitCount = 0;
MSG_COUNT = 1000000;
for (int i = 0; i < MSG_COUNT; i++) {
String msg = "msg:" + i;
if (sm.filter(n1, msg.getBytes())) {
hostHitCountNode1++;
}
if (sm2.filter(n2, msg.getBytes())) {
hostHitCountNode2++;
}
if (sm3.filter(n3, msg.getBytes())) {
hostHitCountNode3++;
} else {
nonHitCount++;
}
}
assertEquals(MSG_COUNT, hostHitCountNode1 + hostHitCountNode2 + hostHitCountNode3);
ssm.deregisterServer(n1, params);
ssm.deregisterServer(n2, params);
ssm.deregisterServer(n3, params);
}
@Test
public void twoNodeDifferentClientIdTest() {
WritableEndpoint<Void> n1 = new WritableEndpoint<Void>("host1", 7001);
WritableEndpoint<Void> n2 = new WritableEndpoint<Void>("host2", 7001);
Map<String, List<String>> params = new HashMap<String, List<String>>();
List<String> vals = new ArrayList<String>();
vals.add("client1");
params.put("clientId", vals);
Map<String, List<String>> params2 = new HashMap<String, List<String>>();
List<String> vals2 = new ArrayList<String>();
vals2.add("client2");
params2.put("clientId", vals2);
ServerSlotManager<Void> ssm = new ServerSlotManager<Void>(HashFunctions.ketama());
SlotAssignmentManager<Void> sm = ssm.registerServer(n1, params);
SlotAssignmentManager<Void> sm2 = ssm.registerServer(n2, params2);
assertFalse(sm.equals(sm2));
int hostHitCountNode1 = 0;
int hostHitCountNode2 = 0;
int nonHitCount = 0;
int MSG_COUNT = 1000000;
for (int i = 0; i < MSG_COUNT; i++) {
String msg = "msg:" + i;
boolean atleastOneMatch = false;
if (sm.filter(n1, msg.getBytes())) {
hostHitCountNode1++;
atleastOneMatch = true;
}
if (sm2.filter(n2, msg.getBytes())) {
hostHitCountNode2++;
atleastOneMatch = true;
}
if (!atleastOneMatch) {
nonHitCount++;
}
}
assertTrue(nonHitCount == 0);
assertEquals(MSG_COUNT, hostHitCountNode1);
assertEquals(MSG_COUNT, hostHitCountNode2);
ssm.deregisterServer(n1, params);
ssm.deregisterServer(n2, params2);
}
@Test
public void twoSameOneDifferentClientIdTest() {
WritableEndpoint<Void> n1 = new WritableEndpoint<Void>("host1", 7001);
WritableEndpoint<Void> n2 = new WritableEndpoint<Void>("host2", 7001);
WritableEndpoint<Void> n3 = new WritableEndpoint<Void>("host3", 7001);
Map<String, List<String>> params = new HashMap<String, List<String>>();
List<String> vals = new ArrayList<String>();
vals.add("client1");
params.put("clientId", vals);
Map<String, List<String>> params3 = new HashMap<String, List<String>>();
List<String> vals3 = new ArrayList<String>();
vals3.add("client3");
params3.put("clientId", vals3);
ServerSlotManager<Void> ssm = new ServerSlotManager<Void>(HashFunctions.ketama());
SlotAssignmentManager<Void> sm = ssm.registerServer(n1, params);
SlotAssignmentManager<Void> sm2 = ssm.registerServer(n2, params);
SlotAssignmentManager<Void> sm3 = ssm.registerServer(n3, params3);
assertFalse(sm.equals(sm3));
assertTrue(sm.equals(sm2));
int hostHitCountNode1 = 0;
int hostHitCountNode2 = 0;
int hostHitCountNode3 = 0;
int nonHitCount = 0;
int MSG_COUNT = 1000000;
for (int i = 0; i < MSG_COUNT; i++) {
boolean atleastOneMatch = false;
String msg = "msg:" + i;
if (sm.filter(n1, msg.getBytes())) {
hostHitCountNode1++;
atleastOneMatch = true;
}
if (sm2.filter(n2, msg.getBytes())) {
hostHitCountNode2++;
atleastOneMatch = true;
}
if (sm3.filter(n3, msg.getBytes())) {
hostHitCountNode3++;
atleastOneMatch = true;
}
if (!atleastOneMatch) {
nonHitCount++;
}
}
assertTrue(nonHitCount == 0);
assertEquals(MSG_COUNT, hostHitCountNode1 + hostHitCountNode2);
assertEquals(MSG_COUNT, hostHitCountNode3);
ssm.deregisterServer(n1, params);
ssm.deregisterServer(n2, params);
ssm.deregisterServer(n3, params3);
}
}
| 7,761 |
0 | Create_ds/mantis/mantis-common/src/test/java/io/reactivx/common | Create_ds/mantis/mantis-common/src/test/java/io/reactivx/common/consistenthashing/ConsistentHashTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.common.consistenthashing;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.mantisrx.common.network.ConsistentHash;
import io.mantisrx.common.network.Endpoint;
import io.mantisrx.common.network.EndpointConfiguration;
import io.mantisrx.common.network.HashFunctions;
import java.util.ArrayList;
import java.util.List;
import org.junit.Test;
public class ConsistentHashTest {
@Test
public void oneNodeTest() {
Endpoint n1 = new Endpoint("host1", 7001);
List<Endpoint> nodes = new ArrayList<Endpoint>();
nodes.add(n1);
ConsistentHash<Endpoint> ch =
new ConsistentHash<Endpoint>(HashFunctions.ketama(), new EndpointConfiguration(), nodes);
int hostHitCountNode1 = 0;
int nonHitCount = 0;
int MSG_COUNT = 100000;
for (int i = 0; i < MSG_COUNT; i++) {
Endpoint sn = ch.get(("msg:" + i).getBytes());
if (sn.getHost().equals("host1")) {
hostHitCountNode1++;
} else {
nonHitCount++;
}
}
assertTrue(nonHitCount == 0);
assertEquals(MSG_COUNT, hostHitCountNode1);
}
@Test
public void emptyNodeThrowsTest() {
List<Endpoint> nodes = new ArrayList<Endpoint>();
try {
ConsistentHash<Endpoint> ch =
new ConsistentHash<Endpoint>(HashFunctions.ketama(), new EndpointConfiguration(), nodes);
fail();
} catch (Exception e) {
}
}
@Test
public void twoNodeTest() {
Endpoint n1 = new Endpoint("host1", 7001);
Endpoint n2 = new Endpoint("host2", 7001);
List<Endpoint> nodes = new ArrayList<Endpoint>();
nodes.add(n1);
nodes.add(n2);
ConsistentHash<Endpoint> ch =
new ConsistentHash<Endpoint>(HashFunctions.ketama(), new EndpointConfiguration(), nodes);
int hostHitCountNode1 = 0;
int hostHitCountNode2 = 0;
int nonHitCount = 0;
int MSG_COUNT = 100000;
for (int i = 0; i < MSG_COUNT; i++) {
Endpoint sn = ch.get(("msg:" + i).getBytes());
if (sn.getHost().equals("host1")) {
hostHitCountNode1++;
} else if (sn.getHost().equals("host2")) {
hostHitCountNode2++;
} else {
nonHitCount++;
}
}
double host1HitPercentage = (double) hostHitCountNode1 / (double) MSG_COUNT;
System.out.println("host1 hit % " + host1HitPercentage);
assertTrue(host1HitPercentage > 0.48 && host1HitPercentage < 0.52);
assertTrue(nonHitCount == 0);
assertEquals(MSG_COUNT, hostHitCountNode1 + hostHitCountNode2);
}
}
| 7,762 |
0 | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/runtime/MachineDefinitionTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
public class MachineDefinitionTest {
@Test
public void testMachineDefinition() {
MachineDefinition md1 = new MachineDefinition(2, 2, 2, 2, 2);
MachineDefinition md2 = new MachineDefinition(1, 1, 1, 1, 1);
assertTrue(md1.canFit(md2));
assertFalse(md2.canFit(md1));
MachineDefinition md3 = new MachineDefinition(2, 2, 2, 2, 2);
MachineDefinition md4 = new MachineDefinition(3, 1, 1, 1, 1);
assertFalse(md3.canFit(md4));
assertFalse(md4.canFit(md3));
MachineDefinition md5 = new MachineDefinition(2, 2, 2, 2, 2);
MachineDefinition md6 = new MachineDefinition(2, 3, 1, 1, 1);
assertFalse(md5.canFit(md6));
assertFalse(md6.canFit(md5));
MachineDefinition md7 = new MachineDefinition(2, 2, 2, 2, 2);
MachineDefinition md8 = new MachineDefinition(2, 2, 2, 2, 2);
assertTrue(md7.canFit(md8));
assertTrue(md8.canFit(md7));
}
}
| 7,763 |
0 | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/runtime/descriptor/DeploymentStrategyTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.descriptor;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import io.mantisrx.common.JsonSerializer;
import org.junit.Test;
public class DeploymentStrategyTest {
private final JsonSerializer serializer = new JsonSerializer();
@Test
public void shouldRequireInheritInstanceCheck() {
DeploymentStrategy res = DeploymentStrategy.builder()
.stage(1, StageDeploymentStrategy.builder().inheritInstanceCount(true).build())
.build();
assertTrue(res.requireInheritInstanceCheck());
assertTrue(res.requireInheritInstanceCheck(1));
assertFalse(res.requireInheritInstanceCheck(2));
res = DeploymentStrategy.builder()
.stage(2, StageDeploymentStrategy.builder().inheritInstanceCount(true).build())
.stage(3, StageDeploymentStrategy.builder().inheritInstanceCount(false).build())
.build();
assertTrue(res.requireInheritInstanceCheck());
assertTrue(res.requireInheritInstanceCheck(2));
assertFalse(res.requireInheritInstanceCheck(1));
assertFalse(res.requireInheritInstanceCheck(3));
}
@Test
public void shouldNotRequireInheritInstanceCheck() {
DeploymentStrategy res = DeploymentStrategy.builder()
.stage(1, StageDeploymentStrategy.builder().inheritInstanceCount(false).build())
.build();
assertFalse(res.requireInheritInstanceCheck(1));
assertFalse(res.requireInheritInstanceCheck(2));
assertFalse(res.requireInheritInstanceCheck());
// test default setting
res = DeploymentStrategy.builder().build();
assertFalse(res.requireInheritInstanceCheck(1));
assertFalse(res.requireInheritInstanceCheck(2));
assertFalse(res.requireInheritInstanceCheck());
// test multiple stages
res = DeploymentStrategy.builder()
.stage(1, StageDeploymentStrategy.builder().build())
.stage(3, StageDeploymentStrategy.builder().inheritInstanceCount(false).build())
.build();
assertFalse(res.requireInheritInstanceCheck(1));
assertFalse(res.requireInheritInstanceCheck(2));
assertFalse(res.requireInheritInstanceCheck());
}
@Test
public void testSerialization() throws Exception {
String expected = "{"
+ "\"stageDeploymentStrategyMap\":"
+ "{"
+ "\"1\":{\"inheritInstanceCount\":false},"
+ "\"2\":{\"inheritInstanceCount\":true},"
+ "\"3\":{\"inheritInstanceCount\":true}},"
+ "\"resourceClusterId\":\"rescluster1\""
+ "}";
final DeploymentStrategy ds = serializer.fromJSON(expected, DeploymentStrategy.class);
assertEquals(expected.replaceAll("[\\n\\s]+", ""), serializer.toJson(ds));
}
@Test
public void testSerializationBackCompat() throws Exception {
String expected = "{"
+ "\"stageDeploymentStrategyMap\":"
+ "{"
+ "\"1\":{\"inheritInstanceCount\":false},"
+ "\"2\":{\"inheritInstanceCount\":true},"
+ "\"3\":{\"inheritInstanceCount\":true}}"
+ "}";
final DeploymentStrategy ds = serializer.fromJSON(expected, DeploymentStrategy.class);
assertEquals(expected.replaceAll("[\\n\\s]+", ""), serializer.toJson(ds));
}
}
| 7,764 |
0 | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/runtime/descriptor/StageScalingPolicyTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.descriptor;
import static org.junit.Assert.assertEquals;
import io.mantisrx.common.JsonSerializer;
import io.mantisrx.runtime.descriptor.StageScalingPolicy.RollingCount;
import io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason;
import io.mantisrx.runtime.descriptor.StageScalingPolicy.Strategy;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
public class StageScalingPolicyTest {
private final JsonSerializer serializer = new JsonSerializer();
@Test
public void testSerialization() throws Exception {
Map<ScalingReason, Strategy> smap = new HashMap<>();
smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null));
smap.put(ScalingReason.DataDrop, new Strategy(ScalingReason.DataDrop, 0.0, 2.0, null));
StageScalingPolicy policy = new StageScalingPolicy(1, 1, 2, 1, 1, 60, smap);
final String expected = "{\n" +
" \"stage\": 1,\n" +
" \"min\": 1,\n" +
" \"max\": 2,\n" +
" \"increment\": 1,\n" +
" \"decrement\": 1,\n" +
" \"coolDownSecs\": 60,\n" +
" \"strategies\":\n" +
" {\n" +
" \"CPU\":\n" +
" {\n" +
" \"reason\": \"CPU\",\n" +
" \"scaleDownBelowPct\": 0.5,\n" +
" \"scaleUpAbovePct\": 0.75,\n" +
" \"rollingCount\":\n" +
" {\n" +
" \"count\": 1,\n" +
" \"of\": 1\n" +
" }\n" +
" },\n" +
" \"DataDrop\":\n" +
" {\n" +
" \"reason\": \"DataDrop\",\n" +
" \"scaleDownBelowPct\": 0.0,\n" +
" \"scaleUpAbovePct\": 2.0,\n" +
" \"rollingCount\":\n" +
" {\n" +
" \"count\": 1,\n" +
" \"of\": 1\n" +
" }\n" +
" }\n" +
" },\n" +
" \"enabled\": true\n" +
"}";
StageScalingPolicy actual =
serializer.fromJson(expected.getBytes(StandardCharsets.UTF_8), StageScalingPolicy.class);
assertEquals(policy, actual);
}
@Test
public void testDeserialization() throws Exception {
String json1 = "{\"stage\":1,\"min\":1,\"max\":2,\"increment\":1,\"decrement\":1,\"strategies\":{},\"enabled\":false}";
StageScalingPolicy actual = serializer.fromJSON(json1, StageScalingPolicy.class);
StageScalingPolicy expected = new StageScalingPolicy(1, 1, 2, 1, 1, 0, null);
assertEquals(expected, actual);
String json2 = "{\"stage\":1,\"min\":1,\"max\":5,\"increment\":1,\"decrement\":1,\"coolDownSecs\":600,\"strategies\":{\"CPU\":{\"reason\":\"CPU\",\"scaleDownBelowPct\":50,\"scaleUpAbovePct\":75}},\"enabled\":true}";
actual = serializer.fromJSON(json2, StageScalingPolicy.class);
Map<ScalingReason, Strategy> smap = new HashMap<>();
smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 50, 75.0, new RollingCount(1, 1)));
expected = new StageScalingPolicy(1, 1, 5, 1, 1, 600, smap);
assertEquals(expected, actual);
String json3 = "{\"stage\":1,\"min\":1,\"max\":3,\"increment\":1,\"decrement\":1,\"coolDownSecs\":0,\"strategies\":{\"Memory\":{\"reason\":\"Memory\",\"scaleDownBelowPct\":65,\"scaleUpAbovePct\":80,\"rollingCount\":{\"count\":6,\"of\":10}}},\"enabled\":true}";
actual = serializer.fromJSON(json3, StageScalingPolicy.class);
smap = new HashMap<>();
smap.put(ScalingReason.Memory, new Strategy(ScalingReason.Memory, 65, 80.0, new RollingCount(6, 10)));
expected = new StageScalingPolicy(1, 1, 3, 1, 1, 0, smap);
assertEquals(expected, actual);
}
}
| 7,765 |
0 | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/runtime/descriptor/SchedulingInfoTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.descriptor;
import static java.util.Optional.empty;
import static java.util.Optional.of;
import static org.junit.Assert.assertEquals;
import io.mantisrx.common.JsonSerializer;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.runtime.descriptor.SchedulingInfo.Builder;
import io.mantisrx.shaded.com.google.common.collect.ImmutableMap;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
public class SchedulingInfoTest {
@Test
public void buildWithInheritInstanceTest() {
Map<Integer, StageSchedulingInfo> givenStages = new HashMap<>();
givenStages.put(1, StageSchedulingInfo.builder().numberOfInstances(1).build());
SchedulingInfo.Builder builder = new SchedulingInfo.Builder()
.createWithInstanceInheritance(
givenStages,
i -> empty(),
i -> false,
false);
SchedulingInfo info = builder.build();
assertEquals(1, info.getStages().size());
assertEquals(1, info.forStage(1).getNumberOfInstances());
givenStages.put(2, StageSchedulingInfo.builder().numberOfInstances(2).build());
builder = new SchedulingInfo.Builder()
.createWithInstanceInheritance(
givenStages,
i -> empty(),
i -> false,
false);
info = builder.build();
assertEquals(2, info.getStages().size());
assertEquals(1, info.forStage(1).getNumberOfInstances());
assertEquals(2, info.forStage(2).getNumberOfInstances());
// test valid existing count + no inherit flag
builder = new SchedulingInfo.Builder()
.createWithInstanceInheritance(
givenStages,
i -> of(9),
i -> false,
false);
info = builder.build();
assertEquals(2, info.getStages().size());
assertEquals(1, info.forStage(1).getNumberOfInstances());
assertEquals(2, info.forStage(2).getNumberOfInstances());
// test invalid existing count + inherit flag
builder = new SchedulingInfo.Builder()
.createWithInstanceInheritance(
givenStages,
i -> empty(),
i -> true,
false);
info = builder.build();
assertEquals(2, info.getStages().size());
assertEquals(1, info.forStage(1).getNumberOfInstances());
assertEquals(2, info.forStage(2).getNumberOfInstances());
// test invalid existing count + force inherit flag
builder = new SchedulingInfo.Builder()
.createWithInstanceInheritance(
givenStages,
i -> empty(),
i -> false,
true);
info = builder.build();
assertEquals(2, info.getStages().size());
assertEquals(1, info.forStage(1).getNumberOfInstances());
assertEquals(2, info.forStage(2).getNumberOfInstances());
// test valid existing count + inherit flag
builder = new SchedulingInfo.Builder()
.createWithInstanceInheritance(
givenStages,
i -> of(9),
i -> i == 1,
false);
info = builder.build();
assertEquals(2, info.getStages().size());
assertEquals(9, info.forStage(1).getNumberOfInstances());
assertEquals(2, info.forStage(2).getNumberOfInstances());
// test valid existing count + force inherit flag
builder = new SchedulingInfo.Builder()
.createWithInstanceInheritance(
givenStages,
i -> of(9),
i -> false,
true);
info = builder.build();
assertEquals(2, info.getStages().size());
assertEquals(9, info.forStage(1).getNumberOfInstances());
assertEquals(9, info.forStage(2).getNumberOfInstances());
// test valid existing count + both inherit flag
builder = new SchedulingInfo.Builder()
.createWithInstanceInheritance(
givenStages,
i -> of(9),
i -> true,
true);
info = builder.build();
assertEquals(2, info.getStages().size());
assertEquals(9, info.forStage(1).getNumberOfInstances());
assertEquals(9, info.forStage(2).getNumberOfInstances());
// test job master
givenStages.put(0, StageSchedulingInfo.builder().numberOfInstances(1).build());
builder = new SchedulingInfo.Builder()
.createWithInstanceInheritance(
givenStages,
i -> of(9),
i -> true,
true);
info = builder.build();
assertEquals(3, info.getStages().size());
assertEquals(9, info.forStage(0).getNumberOfInstances());
assertEquals(9, info.forStage(1).getNumberOfInstances());
assertEquals(9, info.forStage(2).getNumberOfInstances());
}
@Test
public void testSerialization() throws Exception {
Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>();
smap.put(StageScalingPolicy.ScalingReason.Memory, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.Memory, 0.1, 0.6, null));
Builder builder = new Builder()
.numberOfStages(2)
.multiWorkerScalableStageWithConstraints(
2,
new MachineDefinition(1, 1.24, 0.0, 1, 1),
null, null,
new StageScalingPolicy(1, 1, 3, 1, 1, 60, smap)
)
.multiWorkerScalableStageWithConstraints(
3,
new MachineDefinition(1, 1.24, 0.0, 1, 1),
null, null,
new StageScalingPolicy(1, 1, 3, 1, 1, 60, smap)
);
JsonSerializer serializer = new JsonSerializer();
String expected = "" +
"{" +
" \"stages\":" +
" {" +
" \"1\":" +
" {" +
" \"numberOfInstances\": 2," +
" \"machineDefinition\":" +
" {" +
" \"cpuCores\": 1.0," +
" \"memoryMB\": 1.24," +
" \"networkMbps\": 128.0," +
" \"diskMB\": 1.0," +
" \"numPorts\": 1" +
" }," +
" \"hardConstraints\":" +
" []," +
" \"softConstraints\":" +
" []," +
" \"scalingPolicy\":" +
" {" +
" \"stage\": 1," +
" \"min\": 1," +
" \"max\": 3," +
" \"increment\": 1," +
" \"decrement\": 1," +
" \"coolDownSecs\": 60," +
" \"strategies\":" +
" {" +
" \"Memory\":" +
" {" +
" \"reason\": \"Memory\"," +
" \"scaleDownBelowPct\": 0.1," +
" \"scaleUpAbovePct\": 0.6," +
" \"rollingCount\":" +
" {" +
" \"count\": 1," +
" \"of\": 1" +
" }" +
" }" +
" }," +
" \"enabled\": true" +
" }," +
" \"scalable\": true" +
" }," +
" \"2\":" +
" {" +
" \"numberOfInstances\": 3," +
" \"machineDefinition\":" +
" {" +
" \"cpuCores\": 1.0," +
" \"memoryMB\": 1.24," +
" \"networkMbps\": 128.0," +
" \"diskMB\": 1.0," +
" \"numPorts\": 1" +
" }," +
" \"hardConstraints\":" +
" []," +
" \"softConstraints\":" +
" []," +
" \"scalingPolicy\":" +
" {" +
" \"stage\": 2," +
" \"min\": 1," +
" \"max\": 3," +
" \"increment\": 1," +
" \"decrement\": 1," +
" \"coolDownSecs\": 60," +
" \"strategies\":" +
" {" +
" \"Memory\":" +
" {" +
" \"reason\": \"Memory\"," +
" \"scaleDownBelowPct\": 0.1," +
" \"scaleUpAbovePct\": 0.6," +
" \"rollingCount\":" +
" {" +
" \"count\": 1," +
" \"of\": 1" +
" }" +
" }" +
" }," +
" \"enabled\": true" +
" }," +
" \"scalable\": true" +
" }" +
" }" +
"}";
assertEquals(expected.replaceAll("\\s+", ""), serializer.toJson(builder.build()));
}
@Test
public void testSerializationWithSkuId() throws Exception {
Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>();
smap.put(StageScalingPolicy.ScalingReason.Memory, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.Memory, 0.1, 0.6, null));
Builder builder = new Builder()
.numberOfStages(2)
.multiWorkerScalableStageWithConstraints(
2,
new MachineDefinition(1, 1.24, 0.0, 1, 1),
null, null,
new StageScalingPolicy(1, 1, 3, 1, 1, 60, smap),
ImmutableMap.of("containerSkuID", "sku1")
)
.multiWorkerScalableStageWithConstraints(
3,
new MachineDefinition(1, 1.24, 0.0, 1, 1),
null, null,
new StageScalingPolicy(1, 1, 3, 1, 1, 60, smap),
ImmutableMap.of("containerSkuID", "sku2")
);
JsonSerializer serializer = new JsonSerializer();
String expected = "" +
"{" +
" \"stages\":" +
" {" +
" \"1\":" +
" {" +
" \"numberOfInstances\": 2," +
" \"machineDefinition\":" +
" {" +
" \"cpuCores\": 1.0," +
" \"memoryMB\": 1.24," +
" \"networkMbps\": 128.0," +
" \"diskMB\": 1.0," +
" \"numPorts\": 1" +
" }," +
" \"hardConstraints\":" +
" []," +
" \"softConstraints\":" +
" []," +
" \"scalingPolicy\":" +
" {" +
" \"stage\": 1," +
" \"min\": 1," +
" \"max\": 3," +
" \"increment\": 1," +
" \"decrement\": 1," +
" \"coolDownSecs\": 60," +
" \"strategies\":" +
" {" +
" \"Memory\":" +
" {" +
" \"reason\": \"Memory\"," +
" \"scaleDownBelowPct\": 0.1," +
" \"scaleUpAbovePct\": 0.6," +
" \"rollingCount\":" +
" {" +
" \"count\": 1," +
" \"of\": 1" +
" }" +
" }" +
" }," +
" \"enabled\": true" +
" }," +
" \"scalable\": true," +
" \"containerAttributes\": {\"containerSkuID\":\"sku1\"}" +
" }," +
" \"2\":" +
" {" +
" \"numberOfInstances\": 3," +
" \"machineDefinition\":" +
" {" +
" \"cpuCores\": 1.0," +
" \"memoryMB\": 1.24," +
" \"networkMbps\": 128.0," +
" \"diskMB\": 1.0," +
" \"numPorts\": 1" +
" }," +
" \"hardConstraints\":" +
" []," +
" \"softConstraints\":" +
" []," +
" \"scalingPolicy\":" +
" {" +
" \"stage\": 2," +
" \"min\": 1," +
" \"max\": 3," +
" \"increment\": 1," +
" \"decrement\": 1," +
" \"coolDownSecs\": 60," +
" \"strategies\":" +
" {" +
" \"Memory\":" +
" {" +
" \"reason\": \"Memory\"," +
" \"scaleDownBelowPct\": 0.1," +
" \"scaleUpAbovePct\": 0.6," +
" \"rollingCount\":" +
" {" +
" \"count\": 1," +
" \"of\": 1" +
" }" +
" }" +
" }," +
" \"enabled\": true" +
" }," +
" \"scalable\": true," +
" \"containerAttributes\": {\"containerSkuID\":\"sku2\"}" +
" }" +
" }" +
"}";
assertEquals(expected.replaceAll("\\s+", ""), serializer.toJson(builder.build()));
}
}
| 7,766 |
0 | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/runtime/parameter/TestSerialization.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.parameter;
import static org.junit.Assert.assertEquals;
import io.mantisrx.common.JsonSerializer;
import org.junit.Test;
public class TestSerialization {
private final JsonSerializer jsonSerializer = new JsonSerializer();
@Test
public void testSerializationOfParameters() throws Exception {
Parameter parameter = new Parameter("name", "value");
String expected = "{\"name\":\"name\",\"value\":\"value\"}";
assertEquals(expected, jsonSerializer.toJson(parameter));
Parameter actual = jsonSerializer.fromJSON(expected, Parameter.class);
assertEquals(parameter, actual);
}
}
| 7,767 |
0 | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/common/AckTest.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
public class AckTest {
private final JsonSerializer serializer = new JsonSerializer();
@Test
public void testAckInstance() throws Exception {
Ack ack = Ack.getInstance();
String serialized = serializer.toJson(ack);
Ack actual = serializer.fromJSON(serialized, Ack.class);
assertEquals(ack, actual);
}
}
| 7,768 |
0 | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/common/WorkerPortsTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
public class WorkerPortsTest {
private final JsonSerializer serializer = new JsonSerializer();
/**
* Uses legacy constructor {@link WorkerPorts#WorkerPorts(List)} which expects
* at least 5 ports: metrics, debug, console, custom.
*/
@Test(expected = IllegalArgumentException.class)
public void shouldNotConstructWorkerPorts() {
// Not enough ports.
new WorkerPorts(Arrays.asList(1, 1, 1, 1));
}
/**
* Uses legacy constructor {@link WorkerPorts#WorkerPorts(List)} which cannot construct
* a WorkerPorts object, because a worker needs a sink to be useful.
* Otherwise, other workers can't connect to it.
*/
@Test(expected = IllegalStateException.class)
public void shouldNotConstructWorkerPortsWithDuplicatePorts() {
// Enough ports, but has duplicate ports.
new WorkerPorts(Arrays.asList(1, 1, 1, 1, 1));
}
/**
* Uses legacy constructor {@link WorkerPorts#WorkerPorts(List)} but was given a port
* out of range.
*/
@Test(expected = IllegalStateException.class)
public void shouldNotConstructWorkerPortsWithInvalidPortRange() {
// Enough ports, but given an invalid port range
new WorkerPorts(Arrays.asList(1, 1, 1, 1, 65536));
}
/**
* Uses legacy constructor {@link WorkerPorts#WorkerPorts(List)}.
*/
@Test
public void shouldConstructValidWorkerPorts() {
WorkerPorts workerPorts = new WorkerPorts(Arrays.asList(1, 2, 3, 4, 5));
}
@Test
public void testIfWorkerPortsIsSerializableByJson() throws Exception {
final WorkerPorts workerPorts =
new WorkerPorts(1, 2, 3, 4, 5);
String workerPortsJson = serializer.toJson(workerPorts);
assertEquals(workerPortsJson, "{\"metricsPort\":1,\"debugPort\":2,\"consolePort\":3,\"customPort\":4,\"ports\":[5],\"sinkPort\":5}");
final WorkerPorts actual = serializer.fromJSON(workerPortsJson, WorkerPorts.class);
assertEquals(workerPorts, actual);
}
@Test
public void testWorkerPortsIsSerializableByJava() {
final WorkerPorts workerPorts =
new WorkerPorts(1, 2, 3, 4, 5);
byte[] serialized = SerializationUtils.serialize(workerPorts);
assertEquals(workerPorts, SerializationUtils.deserialize(serialized));
}
} | 7,769 |
0 | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/test/java/io/mantisrx/common/compression/CompressionUtilsTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.compression;
import static org.junit.Assert.assertEquals;
import io.mantisrx.common.MantisServerSentEvent;
import java.io.BufferedReader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import org.junit.Test;
public class CompressionUtilsTest {
@Test
public void shouldTokenizeWithEventsContainingPartialDelimiterMatches() throws Exception {
String testInput = "ab$cdef$$$ghi$jkl$$$lmno$$pqrst$";
BufferedReader reader = new BufferedReader(new StringReader(testInput));
List<MantisServerSentEvent> result = CompressionUtils.tokenize(reader);
assertEquals(result.size(), 3);
assertEquals(result.get(0).getEventAsString(), "ab$cdef");
assertEquals(result.get(1).getEventAsString(), "ghi$jkl");
assertEquals(result.get(2).getEventAsString(), "lmno$$pqrst$");
}
@Test
public void shouldTokenizeWithEventsContainingPartialDelimiterMatchesWithCustomDelimiter() throws Exception {
String delimiter = "a04f0418-bdff-4f53-af7d-9f5a093b9d65";
String event1 = "ab" + delimiter.substring(0, 9) + "cdef";
String event2 = "ghi" + delimiter.substring(0, 5) + "jkl";
String event3 = "lmno" + delimiter.substring(0, 4) + "pqrst" + delimiter.substring(0, 2);
String testInput = event1
+ delimiter
+ event2
+ delimiter
+ event3;
BufferedReader reader = new BufferedReader(new StringReader(testInput));
List<MantisServerSentEvent> result = CompressionUtils.tokenize(reader, delimiter);
List<String> actual = result.stream().map(e -> e.getEventAsString()).collect(Collectors.toList());
assertEquals("Delimiter: '" + delimiter + "'", Arrays.asList(event1,event2,event3), actual);
}
@Test
public void testDelimiterWiithPrefixMatchingEndOfMEssage() throws Exception {
// Delimiter starts with 'c', event1 ends with 'c'
String delimiter = "ccd";
String event1 = "abc";
String event2 = "def";
String event3 = "ghi";
String testInput = event1
+ delimiter
+ event2
+ delimiter
+ event3;
BufferedReader reader = new BufferedReader(new StringReader(testInput));
List<MantisServerSentEvent> result = CompressionUtils.tokenize(reader, delimiter);
List<String> actual = result.stream().map(e -> e.getEventAsString()).collect(Collectors.toList());
assertEquals("Delimiter: '" + delimiter + "'", Arrays.asList(event1,event2,event3), actual);
}
@Test
public void testMultiline() throws Exception {
String delimiter = "ccd";
String event1 = "abc";
String event2 = "def";
String event3 = "ghi";
StringBuffer buf = new StringBuffer();
String testInput = event1
+ delimiter
+ event2
+ delimiter
+ event3;
// Turn input into 1 character per line
for (int i = 0; i < testInput.length(); i++) {
buf.append(testInput.charAt(i)).append("\n");
}
testInput = buf.toString();
BufferedReader reader = new BufferedReader(new StringReader(testInput));
List<MantisServerSentEvent> result = CompressionUtils.tokenize(reader, delimiter);
List<String> actual = result.stream().map(e -> e.getEventAsString()).collect(Collectors.toList());
assertEquals("Delimiter: '" + delimiter + "'", Arrays.asList(event1,event2,event3), actual);
}
@Test
public void testCompression() throws Exception {
List<byte[]> events1 = new ArrayList<>();
events1.add("1".getBytes());
events1.add("2".getBytes());
events1.add("3".getBytes());
List<byte[]> events2 = new ArrayList<>();
events2.add("4".getBytes());
events2.add("5".getBytes());
events2.add("6".getBytes());
List<List<byte[]>> buffer = new ArrayList<>();
buffer.add(events1);
buffer.add(events2);
byte[] compressed = CompressionUtils.compressAndBase64EncodeBytes(buffer, false);
List<MantisServerSentEvent> decompressed = CompressionUtils.decompressAndBase64Decode(new String(compressed), true, false);
assertEquals("1", decompressed.get(0).getEventAsString());
assertEquals("2", decompressed.get(1).getEventAsString());
assertEquals("3", decompressed.get(2).getEventAsString());
assertEquals("4", decompressed.get(3).getEventAsString());
assertEquals("5", decompressed.get(4).getEventAsString());
assertEquals("6", decompressed.get(5).getEventAsString());
// test snappy
compressed = CompressionUtils.compressAndBase64EncodeBytes(buffer, true);
decompressed = CompressionUtils.decompressAndBase64Decode(new String(compressed), true, true);
assertEquals("1", decompressed.get(0).getEventAsString());
assertEquals("2", decompressed.get(1).getEventAsString());
assertEquals("3", decompressed.get(2).getEventAsString());
assertEquals("4", decompressed.get(3).getEventAsString());
assertEquals("5", decompressed.get(4).getEventAsString());
assertEquals("6", decompressed.get(5).getEventAsString());
// test custom delimiter
compressed = CompressionUtils.compressAndBase64EncodeBytes(buffer, true, "abcdefg".getBytes());
decompressed = CompressionUtils.decompressAndBase64Decode(new String(compressed), true, true, "abcdefg");
assertEquals("1", decompressed.get(0).getEventAsString());
assertEquals("2", decompressed.get(1).getEventAsString());
assertEquals("3", decompressed.get(2).getEventAsString());
assertEquals("4", decompressed.get(3).getEventAsString());
assertEquals("5", decompressed.get(4).getEventAsString());
assertEquals("6", decompressed.get(5).getEventAsString());
}
}
| 7,770 |
0 | Create_ds/mantis/mantis-common/src/test/java/com/mantisrx/common | Create_ds/mantis/mantis-common/src/test/java/com/mantisrx/common/utils/LabelUtilsTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mantisrx.common.utils;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.mantisrx.common.Label;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.util.ArrayList;
import java.util.List;
import org.junit.Test;
public class LabelUtilsTest {
@Test
public void testGenerate3Pairs() {
String tagQuery = "k1=v1,k2=v2,k3=v3";
List<Label> pairs = LabelUtils.generatePairs(tagQuery);
assertEquals(3, pairs.size());
assertTrue(pairs.contains(new Label("k1", "v1")));
assertTrue(pairs.contains(new Label("k2", "v2")));
assertTrue(pairs.contains(new Label("k3", "v3")));
}
@Test
public void testGenerate1Pair() {
String tagQuery = "k1=v1";
List<Label> pairs = LabelUtils.generatePairs(tagQuery);
assertEquals(1, pairs.size());
assertTrue(pairs.contains(new Label("k1", "v1")));
}
@Test
public void testGeneratePairNull() {
String tagQuery = null;
List<Label> pairs = LabelUtils.generatePairs(tagQuery);
assertEquals(0, pairs.size());
}
@Test
public void testGeneratePairMalformed() {
String tagQuery = "k1=";
List<Label> pairs = LabelUtils.generatePairs(tagQuery);
assertEquals(0, pairs.size());
}
@Test
public void testGeneratePairPartialMalformed() {
String tagQuery = "k1=,k2=v2";
List<Label> pairs = LabelUtils.generatePairs(tagQuery);
assertEquals(1, pairs.size());
assertEquals(new Label("k2", "v2"), pairs.get(0));
}
@Test
public void testAllPairsPresent() {
String tagQuery = "k1=v1,k2=v2,k3=v3";
List<Label> expectedPairs = LabelUtils.generatePairs(tagQuery);
List<Label> actualPairs = new ArrayList<>();
actualPairs.add(new Label("k1", "v1"));
actualPairs.add(new Label("k2", "v2"));
actualPairs.add(new Label("k3", "v3"));
assertTrue(LabelUtils.allPairsPresent(expectedPairs, actualPairs));
}
@Test
public void testAllPairsPresent2() {
String tagQuery = "k1=v1,k2=v2,k3=v3";
List<Label> expectedPairs = LabelUtils.generatePairs(tagQuery);
List<Label> actualPairs = new ArrayList<>();
actualPairs.add(new Label("k1", "v1"));
actualPairs.add(new Label("k2", "v2"));
assertFalse(LabelUtils.allPairsPresent(expectedPairs, actualPairs));
}
@Test
public void testSomePairsPresent() {
String tagQuery = "k1=v1,k2=v2,k3=v3";
List<Label> expectedPairs = LabelUtils.generatePairs(tagQuery);
List<Label> actualPairs = new ArrayList<>();
actualPairs.add(new Label("k1", "v1"));
actualPairs.add(new Label("k2", "v2"));
assertTrue(LabelUtils.somePairsPresent(expectedPairs, actualPairs));
}
@Test
public void testSomePairsPresent2() {
String tagQuery = "k1=v1,k2=v2,k3=v3";
List<Label> expectedPairs = LabelUtils.generatePairs(tagQuery);
List<Label> actualPairs = new ArrayList<>();
actualPairs.add(new Label("k4", "v1"));
actualPairs.add(new Label("k5", "v2"));
assertFalse(LabelUtils.somePairsPresent(expectedPairs, actualPairs));
}
@Test
public void testSomePairsPresent3() {
String tagQuery = "";
List<Label> expectedPairs = LabelUtils.generatePairs(tagQuery);
List<Label> actualPairs = new ArrayList<>();
actualPairs.add(new Label("k4", "v1"));
actualPairs.add(new Label("k5", "v2"));
assertFalse(LabelUtils.somePairsPresent(expectedPairs, actualPairs));
}
@Test
public void testSerDe() {
Label l = new Label("k1", "v1");
ObjectMapper mapper = new ObjectMapper();
try {
System.out.println(mapper.writeValueAsString(l));
} catch (JsonProcessingException e) {
fail();
}
}
} | 7,771 |
0 | Create_ds/mantis/mantis-common/src/testFixtures/java/io/mantisrx/common | Create_ds/mantis/mantis-common/src/testFixtures/java/io/mantisrx/common/util/DelegateClock.java | /*
* Copyright 2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.common.util;
import java.time.Clock;
import java.time.Instant;
import java.time.ZoneId;
import java.util.concurrent.atomic.AtomicReference;
import lombok.RequiredArgsConstructor;
@RequiredArgsConstructor
public class DelegateClock extends Clock {
private final AtomicReference<Clock> delegate;
@Override
public ZoneId getZone() {
return delegate.get().getZone();
}
@Override
public Clock withZone(ZoneId zone) {
return delegate.get().withZone(zone);
}
@Override
public Instant instant() {
return delegate.get().instant();
}
}
| 7,772 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis/operators/OperatorGroupBy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.mantis.operators;
import java.util.Queue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicLongFieldUpdater;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Observable.Operator;
import rx.Observer;
import rx.Producer;
import rx.Subscriber;
import rx.exceptions.OnErrorThrowable;
import rx.functions.Action0;
import rx.functions.Func1;
import rx.internal.operators.NotificationLite;
import rx.observables.GroupedObservable;
import rx.subjects.Subject;
/**
* Groups the items emitted by an Observable according to a specified criterion, and emits these
* grouped items as Observables, one Observable per group.
* <p>
* <img width="640" height="360" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/groupBy.png" alt="">
*
* @param <K>
* the key type
* @param <T>
* the source and group value type
* @param <R>
* the value type of the groups
*/
public class OperatorGroupBy<T, K, R> implements Operator<GroupedObservable<K, R>, T> {
private final static Func1<Object, Object> IDENTITY = new Func1<Object, Object>() {
@Override
public Object call(Object t) {
return t;
}
};
final Func1<? super T, ? extends K> keySelector;
final Func1<? super T, ? extends R> valueSelector;
@SuppressWarnings("unchecked")
public OperatorGroupBy(final Func1<? super T, ? extends K> keySelector) {
this(keySelector, (Func1<T, R>) IDENTITY);
}
public OperatorGroupBy(
Func1<? super T, ? extends K> keySelector,
Func1<? super T, ? extends R> valueSelector) {
this.keySelector = keySelector;
this.valueSelector = valueSelector;
}
@Override
public Subscriber<? super T> call(final Subscriber<? super GroupedObservable<K, R>> child) {
return new GroupBySubscriber<K, T, R>(keySelector, valueSelector, child);
}
static final class GroupBySubscriber<K, T, R> extends Subscriber<T> {
@SuppressWarnings("rawtypes")
static final AtomicIntegerFieldUpdater<GroupBySubscriber> COMPLETION_EMITTED_UPDATER = AtomicIntegerFieldUpdater.newUpdater(GroupBySubscriber.class, "completionEmitted");
@SuppressWarnings("rawtypes")
static final AtomicIntegerFieldUpdater<GroupBySubscriber> TERMINATED_UPDATER = AtomicIntegerFieldUpdater.newUpdater(GroupBySubscriber.class, "terminated");
@SuppressWarnings("rawtypes")
static final AtomicLongFieldUpdater<GroupBySubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(GroupBySubscriber.class, "requested");
@SuppressWarnings("rawtypes")
static final AtomicLongFieldUpdater<GroupBySubscriber> BUFFERED_COUNT = AtomicLongFieldUpdater.newUpdater(GroupBySubscriber.class, "bufferedCount");
private static final int MAX_QUEUE_SIZE = 1024;
final GroupBySubscriber<K, T, R> self = this;
final Func1<? super T, ? extends K> keySelector;
final Func1<? super T, ? extends R> elementSelector;
final Subscriber<? super GroupedObservable<K, R>> child;
private final ConcurrentHashMap<K, GroupState<K, T>> groups = new ConcurrentHashMap<K, GroupState<K, T>>();
volatile int completionEmitted;
volatile int terminated;
volatile long requested;
volatile long bufferedCount;
public GroupBySubscriber(
Func1<? super T, ? extends K> keySelector,
Func1<? super T, ? extends R> elementSelector,
Subscriber<? super GroupedObservable<K, R>> child) {
super();
this.keySelector = keySelector;
this.elementSelector = elementSelector;
this.child = child;
}
@Override
public void onStart() {
REQUESTED.set(this, MAX_QUEUE_SIZE);
request(MAX_QUEUE_SIZE);
}
@Override
public void onCompleted() {
if (TERMINATED_UPDATER.compareAndSet(this, 0, 1)) {
// if we receive onCompleted from our parent we onComplete children
// for each group check if it is ready to accept more events if so pass the oncomplete through else buffer it.
for (GroupState<K, T> group : groups.values()) {
emitItem(group, NotificationLite.completed());
}
// special case (no groups emitted ... or all unsubscribed)
if (groups.size() == 0) {
// we must track 'completionEmitted' seperately from 'completed' since `completeInner` can result in childObserver.onCompleted() being emitted
if (COMPLETION_EMITTED_UPDATER.compareAndSet(this, 0, 1)) {
child.onCompleted();
}
}
}
}
@Override
public void onError(Throwable e) {
if (TERMINATED_UPDATER.compareAndSet(this, 0, 1)) {
// we immediately tear everything down if we receive an error
child.onError(e);
}
}
void requestFromGroupedObservable(long n, GroupState<K, T> group) {
group.requested.getAndAdd(n);
if (group.count.getAndIncrement() == 0) {
pollQueue(group);
}
}
// The grouped observable propagates the 'producer.request' call from it's subscriber to this method
// Here we keep track of the requested count for each group
// If we already have items queued when a request comes in we vend those and decrement the outstanding request count
@Override
public void onNext(T t) {
try {
final K key = keySelector.call(t);
GroupState<K, T> group = groups.get(key);
if (group == null) {
// this group doesn't exist
if (child.isUnsubscribed()) {
// we have been unsubscribed on the outer so won't send any more groups
return;
}
group = createNewGroup(key);
}
emitItem(group, NotificationLite.next(t));
} catch (Throwable e) {
onError(OnErrorThrowable.addValueAsLastCause(e, t));
}
}
private GroupState<K, T> createNewGroup(final K key) {
final GroupState<K, T> groupState = new GroupState<K, T>();
GroupedObservable<K, R> go = GroupedObservable.create(key, new OnSubscribe<R>() {
@Override
public void call(final Subscriber<? super R> o) {
o.setProducer(new Producer() {
@Override
public void request(long n) {
requestFromGroupedObservable(n, groupState);
}
});
final AtomicBoolean once = new AtomicBoolean();
groupState.getObservable().doOnUnsubscribe(new Action0() {
@Override
public void call() {
if (once.compareAndSet(false, true)) {
// done once per instance, either onComplete or onUnSubscribe
cleanupGroup(key);
}
}
}).unsafeSubscribe(new Subscriber<T>(o) {
@Override
public void onCompleted() {
o.onCompleted();
// eagerly cleanup instead of waiting for unsubscribe
if (once.compareAndSet(false, true)) {
// done once per instance, either onComplete or onUnSubscribe
cleanupGroup(key);
}
}
@Override
public void onError(Throwable e) {
o.onError(e);
}
@Override
public void onNext(T t) {
try {
o.onNext(elementSelector.call(t));
} catch (Throwable e) {
onError(OnErrorThrowable.addValueAsLastCause(e, t));
}
}
});
}
});
GroupState<K, T> putIfAbsent = groups.putIfAbsent(key, groupState);
if (putIfAbsent != null) {
// this shouldn't happen (because we receive onNext sequentially) and would mean we have a bug
throw new IllegalStateException("Group already existed while creating a new one");
}
child.onNext(go);
return groupState;
}
private void cleanupGroup(K key) {
GroupState<K, T> removed;
removed = groups.remove(key);
if (removed != null) {
if (removed.buffer.size() > 0) {
BUFFERED_COUNT.addAndGet(self, -removed.buffer.size());
}
completeInner();
// since we may have unsubscribed early with items in the buffer
// we remove those above and have freed up room to request more
// so give it a chance to request more now
requestMoreIfNecessary();
}
}
private void emitItem(GroupState<K, T> groupState, Object item) {
Queue<Object> q = groupState.buffer;
AtomicLong keyRequested = groupState.requested;
REQUESTED.decrementAndGet(this);
// short circuit buffering
if (keyRequested != null && keyRequested.get() > 0 && (q == null || q.isEmpty())) {
@SuppressWarnings("unchecked")
Observer<Object> obs = (Observer<Object>) groupState.getObserver();
NotificationLite.accept(obs, item);
keyRequested.decrementAndGet();
} else {
q.add(item);
BUFFERED_COUNT.incrementAndGet(this);
if (groupState.count.getAndIncrement() == 0) {
pollQueue(groupState);
}
}
requestMoreIfNecessary();
}
private void pollQueue(GroupState<K, T> groupState) {
do {
drainIfPossible(groupState);
long c = groupState.count.decrementAndGet();
if (c > 1) {
/*
* Set down to 1 and then iterate again.
* we lower it to 1 otherwise it could have grown very large while in the last poll loop
* and then we can end up looping all those times again here before existing even once we've drained
*/
groupState.count.set(1);
// we now loop again, and if anything tries scheduling again after this it will increment and cause us to loop again after
}
} while (groupState.count.get() > 0);
}
private void requestMoreIfNecessary() {
if (REQUESTED.get(this) < (MAX_QUEUE_SIZE) && terminated == 0) {
long requested = REQUESTED.get(this);
long toRequest = MAX_QUEUE_SIZE - REQUESTED.get(this) - BUFFERED_COUNT.get(this);
if (toRequest > 0 && REQUESTED.compareAndSet(this, requested, toRequest + requested)) {
request(toRequest);
}
}
}
private void drainIfPossible(GroupState<K, T> groupState) {
while (groupState.requested.get() > 0) {
Object t = groupState.buffer.poll();
if (t != null) {
@SuppressWarnings("unchecked")
Observer<Object> obs = (Observer<Object>) groupState.getObserver();
NotificationLite.accept(obs, t);
groupState.requested.decrementAndGet();
BUFFERED_COUNT.decrementAndGet(this);
// if we have used up all the events we requested from upstream then figure out what to ask for this time based on the empty space in the buffer
requestMoreIfNecessary();
} else {
// queue is empty break
break;
}
}
}
private void completeInner() {
// if we have no outstanding groups (all completed or unsubscribe) and terminated/unsubscribed on outer
if (groups.size() == 0 && (terminated == 1 || child.isUnsubscribed())) {
// completionEmitted ensures we only emit onCompleted once
if (COMPLETION_EMITTED_UPDATER.compareAndSet(this, 0, 1)) {
if (child.isUnsubscribed()) {
// if the entire groupBy has been unsubscribed and children are completed we will propagate the unsubscribe up.
unsubscribe();
}
child.onCompleted();
}
}
}
private static class GroupState<K, T> {
private final Subject<T, T> s = BufferUntilSubscriber.create();
private final AtomicLong requested = new AtomicLong();
private final AtomicLong count = new AtomicLong();
private final Queue<Object> buffer = new ConcurrentLinkedQueue<Object>(); // TODO should this be lazily created?
public Observable<T> getObservable() {
return s;
}
public Observer<T> getObserver() {
return s;
}
}
}
}
| 7,773 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis/operators/BufferUntilSubscriber.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.mantis.operators;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicReference;
import rx.Observer;
import rx.Subscriber;
import rx.functions.Action0;
import rx.internal.operators.NotificationLite;
import rx.subjects.Subject;
import rx.subscriptions.Subscriptions;
/**
* A solution to the "time gap" problem that occurs with {@code groupBy} and {@code pivot}.
* <p>
* This currently has temporary unbounded buffers. It needs to become bounded and then do one of two things:
* <ol>
* <li>blow up and make the user do something about it</li>
* <li>work with the backpressure solution ... still to be implemented (such as co-routines)</li>
* </ol><p>
* Generally the buffer should be very short lived (milliseconds) and then stops being involved. It can become a
* memory leak though if a {@code GroupedObservable} backed by this class is emitted but never subscribed to
* (such as filtered out). In that case, either a time-bomb to throw away the buffer, or just blowing up and
* making the user do something about it is needed.
* <p>
* For example, to filter out {@code GroupedObservable}s, perhaps they need a silent {@code subscribe()} on them
* to just blackhole the data.
* <p>
* This is an initial start at solving this problem and solves the immediate problem of {@code groupBy} and
* {@code pivot} and trades off the possibility of memory leak for deterministic functionality.
*
* @see <a href="https://github.com/ReactiveX/RxJava/issues/844">the Github issue describing the time gap problem</a>
* @param <T>
* the type of the items to be buffered
*/
public final class BufferUntilSubscriber<T> extends Subject<T, T> {
@SuppressWarnings("rawtypes")
final static Observer EMPTY_OBSERVER = new Observer() {
@Override
public void onCompleted() {
// deliberately no op
}
@Override
public void onError(Throwable e) {
// deliberately no op
}
@Override
public void onNext(Object t) {
// deliberately no op
}
};
final State<T> state;
private boolean forward;
private BufferUntilSubscriber(State<T> state) {
super(new OnSubscribeAction<T>(state));
this.state = state;
}
/**
* Creates a default, unbounded buffering Subject instance.
* @param <T> the value type
* @return the instance
*/
public static <T> BufferUntilSubscriber<T> create() {
State<T> state = new State<T>();
return new BufferUntilSubscriber<T>(state);
}
private void emit(Object v) {
synchronized (state.guard) {
state.buffer.add(v);
if (state.get() != null && !state.emitting) {
// Have an observer and nobody is emitting,
// should drain the `buffer`
forward = true;
state.emitting = true;
}
}
if (forward) {
Object o;
while ((o = state.buffer.poll()) != null) {
NotificationLite.accept(state.get(), o);
}
// Because `emit(Object v)` will be called in sequence,
// no event will be put into `buffer` after we drain it.
}
}
@Override
public void onCompleted() {
if (forward) {
state.get().onCompleted();
} else {
emit(NotificationLite.completed());
}
}
@Override
public void onError(Throwable e) {
if (forward) {
state.get().onError(e);
} else {
emit(NotificationLite.error(e));
}
}
@Override
public void onNext(T t) {
if (forward) {
state.get().onNext(t);
} else {
emit(NotificationLite.next(t));
}
}
@Override
public boolean hasObservers() {
synchronized (state.guard) {
return state.get() != null;
}
}
/** The common state. */
static final class State<T> extends AtomicReference<Observer<? super T>> {
/** */
private static final long serialVersionUID = 8026705089538090368L;
final Object guard = new Object();
final ConcurrentLinkedQueue<Object> buffer = new ConcurrentLinkedQueue<Object>();
/* protected by guard */
boolean emitting;
boolean casObserverRef(Observer<? super T> expected, Observer<? super T> next) {
return compareAndSet(expected, next);
}
}
static final class OnSubscribeAction<T> implements OnSubscribe<T> {
final State<T> state;
public OnSubscribeAction(State<T> state) {
this.state = state;
}
@Override
public void call(final Subscriber<? super T> s) {
if (state.casObserverRef(null, s)) {
s.add(Subscriptions.create(new Action0() {
@SuppressWarnings("unchecked")
@Override
public void call() {
state.set(EMPTY_OBSERVER);
}
}));
boolean win = false;
synchronized (state.guard) {
if (!state.emitting) {
state.emitting = true;
win = true;
}
}
if (win) {
while (true) {
Object o;
while ((o = state.buffer.poll()) != null) {
NotificationLite.accept(state.get(), o);
}
synchronized (state.guard) {
if (state.buffer.isEmpty()) {
// Although the buffer is empty, there is still a chance
// that further events may be put into the `buffer`.
// `emit(Object v)` should handle it.
state.emitting = false;
break;
}
}
}
}
} else {
s.onError(new IllegalStateException("Only one subscriber allowed!"));
}
}
}
} | 7,774 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis/operators/OperatorOnErrorResumeNextViaFunction.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.mantis.operators;
import rx.Observable;
import rx.Observable.Operator;
import rx.Producer;
import rx.Subscriber;
import rx.exceptions.Exceptions;
import rx.functions.Func1;
import rx.plugins.RxJavaPlugins;
/**
* Instruct an Observable to pass control to another Observable (the return value of a function)
* rather than invoking {@code onError} if it encounters an error.
* <p>
* <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/onErrorResumeNext.png" alt="">
* <p>
* By default, when an Observable encounters an error that prevents it from emitting the expected item to its
* Observer, the Observable invokes its Observer's {@code onError} method, and then quits without invoking any
* more of its Observer's methods. The {@code onErrorResumeNext} operation changes this behavior. If you pass a
* function that returns an Observable ({@code resumeFunction}) to {@code onErrorResumeNext}, if the source
* Observable encounters an error, instead of invoking its Observer's {@code onError} method, it will instead
* relinquish control to this new Observable, which will invoke the Observer's {@code onNext} method if it is
* able to do so. In such a case, because no Observable necessarily invokes {@code onError}, the Observer may
* never know that an error happened.
* <p>
* You can use this to prevent errors from propagating or to supply fallback data should errors be
* encountered.
*/
public final class OperatorOnErrorResumeNextViaFunction<T> implements Operator<T, T> {
private final Func1<Throwable, ? extends Observable<? extends T>> resumeFunction;
public OperatorOnErrorResumeNextViaFunction(Func1<Throwable, ? extends Observable<? extends T>> f) {
this.resumeFunction = f;
}
@Override
public Subscriber<? super T> call(final Subscriber<? super T> child) {
Subscriber<T> parent = new Subscriber<T>() {
private boolean done = false;
@Override
public void onCompleted() {
if (done) {
return;
}
done = true;
child.onCompleted();
}
@Override
public void onError(Throwable e) {
if (done) {
Exceptions.throwIfFatal(e);
return;
}
done = true;
try {
RxJavaPlugins.getInstance().getErrorHandler().handleError(e);
unsubscribe();
Observable<? extends T> resume = resumeFunction.call(e);
resume.unsafeSubscribe(child);
} catch (Throwable e2) {
child.onError(e2);
}
}
@Override
public void onNext(T t) {
if (done) {
return;
}
child.onNext(t);
}
@Override
public void setProducer(final Producer producer) {
child.setProducer(new Producer() {
@Override
public void request(long n) {
producer.request(n);
}
});
}
};
child.add(parent);
return parent;
}
}
| 7,775 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis/operators/DisableBackPressureOperator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.mantis.operators;
import java.util.concurrent.atomic.AtomicLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable.Operator;
import rx.Producer;
import rx.Subscriber;
import rx.functions.Action0;
import rx.subscriptions.Subscriptions;
public class DisableBackPressureOperator<T> implements Operator<T, T> {
private static final Logger logger = LoggerFactory.getLogger(DisableBackPressureOperator.class);
public DisableBackPressureOperator() {
}
@Override
public Subscriber<? super T> call(final Subscriber<? super T> o) {
final AtomicLong requested = new AtomicLong();
o.add(Subscriptions.create(new Action0() {
@Override
public void call() {
}
}));
o.setProducer(new Producer() {
@Override
public void request(long n) {
}
});
return new Subscriber<T>(o) {
@Override
public void onCompleted() {
o.onCompleted();
}
@Override
public void onError(Throwable e) {
o.onError(e);
}
@Override
public void onNext(T t) {
o.onNext(t);
}
@Override
public void setProducer(Producer p) {
p.request(Long.MAX_VALUE);
}
};
}
}
| 7,776 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis/operators/BufferOnBackPressureOperator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.mantis.operators;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable.Operator;
import rx.Observer;
import rx.Producer;
import rx.Subscriber;
import rx.functions.Action0;
import rx.internal.operators.NotificationLite;
import rx.subscriptions.Subscriptions;
@SuppressWarnings("unchecked")
public class BufferOnBackPressureOperator<T> implements Operator<T, T> {
public static final String METRICS_NAME_PREFIX = "DropOperator_";
private static final Logger logger = LoggerFactory.getLogger(BufferOnBackPressureOperator.class);
private static final int DEFAULT_SIZE = 4096;
private final int size;
private final ArrayBlockingQueue<Object> queue;
private final Counter next;
private final Counter error;
private final Counter complete;
private final Gauge subscribe;
private final Gauge requestedGauge;
private final Counter dropped;
private final Gauge bufferedGauge;
private String name;
public BufferOnBackPressureOperator(String name) {
this(name, DEFAULT_SIZE);
}
public BufferOnBackPressureOperator(final Metrics m, int size) {
this.size = size;
this.queue = new ArrayBlockingQueue<Object>(size);
next = m.getCounter("" + Counters.onNext);
error = m.getCounter("" + Counters.onError);
complete = m.getCounter("" + Counters.onComplete);
subscribe = m.getGauge("" + Gauges.subscribe);
dropped = m.getCounter("" + Counters.dropped);
requestedGauge = m.getGauge("" + Gauges.requested);
bufferedGauge = m.getGauge("" + Gauges.bufferedGauge);
}
public BufferOnBackPressureOperator(String name, int size) {
this.size = size;
this.name = METRICS_NAME_PREFIX + name;
this.queue = new ArrayBlockingQueue<Object>(size);
Metrics m = new Metrics.Builder()
.name(this.name)
.addCounter("" + Counters.onNext)
.addCounter("" + Counters.onError)
.addCounter("" + Counters.onComplete)
.addGauge("" + Gauges.subscribe)
.addCounter("" + Counters.dropped)
.addGauge("" + Gauges.requested)
.addGauge("" + Gauges.bufferedGauge)
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
next = m.getCounter("" + Counters.onNext);
error = m.getCounter("" + Counters.onError);
complete = m.getCounter("" + Counters.onComplete);
subscribe = m.getGauge("" + Gauges.subscribe);
dropped = m.getCounter("" + Counters.dropped);
requestedGauge = m.getGauge("" + Gauges.requested);
bufferedGauge = m.getGauge("" + Gauges.bufferedGauge);
}
@Override
public Subscriber<? super T> call(final Subscriber<? super T> child) {
subscribe.increment();
final AtomicLong requested = new AtomicLong();
final AtomicInteger completionEmitted = new AtomicInteger();
final AtomicInteger terminated = new AtomicInteger();
final AtomicInteger bufferedCount = new AtomicInteger();
final AtomicBoolean onCompleteReceived = new AtomicBoolean();
final AtomicInteger wip = new AtomicInteger();
child.add(Subscriptions.create(new Action0() {
@Override
public void call() {
subscribe.decrement();
}
}));
child.setProducer(new Producer() {
@Override
public void request(long n) {
requested.getAndAdd(n);
requestedGauge.increment(n);
// System.out.println("request: " + requested.get());
pollQueue(child,
requested,
bufferedCount,
onCompleteReceived,
completionEmitted,
wip);
}
});
Subscriber<T> parent = new Subscriber<T>() {
@Override
public void onStart() {
request(Long.MAX_VALUE);
}
@Override
public void onCompleted() {
if (terminated.compareAndSet(0, 1)) {
complete.increment();
onCompleteReceived.set(true);
pollQueue(child,
requested,
bufferedCount,
onCompleteReceived,
completionEmitted,
wip);
}
}
@Override
public void onError(Throwable e) {
if (terminated.compareAndSet(0, 1)) {
child.onError(e);
error.increment();
queue.clear();
}
}
@Override
public void onNext(T t) {
emitItem(NotificationLite.next(t));
}
private void emitItem(Object item) {
// short circuit buffering
if (requested.get() > 0 && queue.isEmpty()) {
NotificationLite.accept((Observer) child, item);
requested.decrementAndGet();
requestedGauge.decrement();
next.increment();
// System.out.println("next count: " + next.value());
} else {
boolean success = queue.offer(item);
if (success) {
bufferedCount.incrementAndGet();
bufferedGauge.increment();
// System.out.println("buffered count: " + bufferedGauge.value());
drainIfPossible(child, requested, bufferedCount, onCompleteReceived, completionEmitted);
} else {
dropped.increment();
// System.out.println("dropped count: " + dropped.value());
// dropped
}
}
}
};
// if child unsubscribes it should unsubscribe the parent, but not the other way around
child.add(parent);
return parent;
}
private void drainIfPossible(final Subscriber<? super T> child,
AtomicLong requested,
AtomicInteger bufferedCount,
AtomicBoolean onCompleteReceived,
AtomicInteger completionEmitted
) {
while (requested.get() > 0) {
Object t = queue.poll();
if (t != null) {
NotificationLite.accept((Observer) child, t);
requested.decrementAndGet();
requestedGauge.decrement();
bufferedCount.decrementAndGet();
bufferedGauge.decrement();
// System.out.println("buffered count: " + bufferedGauge.value() + " next " + next.value()) ;
} else {
if (onCompleteReceived.get()) {
if (completionEmitted.compareAndSet(0, 1)) {
child.onCompleted();
queue.clear();
bufferedGauge.set(0);
}
}
// queue is empty break
break;
}
}
}
private void pollQueue(final Subscriber<? super T> child,
AtomicLong requested,
AtomicInteger bufferedCount,
AtomicBoolean onCompleteReceived,
AtomicInteger completionEmitted,
AtomicInteger wip) {
do {
drainIfPossible(child, requested, bufferedCount, onCompleteReceived, completionEmitted);
long c = wip.decrementAndGet();
if (c > 1) {
/*
* Set down to 1 and then iterate again.
* we lower it to 1 otherwise it could have grown very large while in the last poll loop
* and then we can end up looping all those times again here before existing even once we've drained
*/
wip.set(1);
// we now loop again, and if anything tries scheduling again after this it will increment and cause us to loop again after
}
} while (wip.get() > 0);
}
public enum Counters {
onNext,
onError,
onComplete,
dropped
}
public enum Gauges {
subscribe,
requested,
bufferedGauge
}
}
| 7,777 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis/operators/DropOperator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.mantis.operators;
import com.netflix.spectator.api.Tag;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import java.util.concurrent.atomic.AtomicLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable.Operator;
import rx.Producer;
import rx.Subscriber;
import rx.subscriptions.Subscriptions;
public class DropOperator<T> implements Operator<T, T> {
public static final String METRIC_GROUP = "DropOperator";
private static final Logger logger = LoggerFactory.getLogger(DropOperator.class);
private final Counter next;
private final Counter error;
private final Counter complete;
private final Counter dropped;
MetricGroupId metricGroupId;
public DropOperator(final Metrics m) {
next = m.getCounter("" + Counters.onNext);
error = m.getCounter("" + Counters.onError);
complete = m.getCounter("" + Counters.onComplete);
dropped = m.getCounter("" + Counters.dropped);
}
public DropOperator(final MetricGroupId groupId) {
this.metricGroupId = groupId;
Metrics m = new Metrics.Builder()
.id(metricGroupId)
.addCounter("" + Counters.onNext)
.addCounter("" + Counters.onError)
.addCounter("" + Counters.onComplete)
.addCounter("" + Counters.dropped)
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
next = m.getCounter("" + Counters.onNext);
error = m.getCounter("" + Counters.onError);
complete = m.getCounter("" + Counters.onComplete);
dropped = m.getCounter("" + Counters.dropped);
}
public DropOperator(final String name) {
this(new MetricGroupId(METRIC_GROUP + "_" + name));
}
public DropOperator(final String name, final Tag... tags) {
this(new MetricGroupId(METRIC_GROUP + "_" + name, tags));
}
@Override
public Subscriber<? super T> call(final Subscriber<? super T> o) {
final AtomicLong requested = new AtomicLong();
o.add(Subscriptions.create(() -> {
}));
o.setProducer(new Producer() {
@Override
public void request(long n) {
if (requested.get() == Long.MAX_VALUE) {
logger.warn("current requested is int max do not increment");
} else {
requested.getAndAdd(n);
}
}
});
return new Subscriber<T>(o) {
@Override
public void onCompleted() {
complete.increment();
o.onCompleted();
}
@Override
public void onError(Throwable e) {
error.increment();
logger.error("onError() occured in DropOperator for groupId: {}", metricGroupId.id(), e);
o.onError(e);
}
@Override
public void onNext(T t) {
if (requested.get() > 0) {
requested.decrementAndGet();
o.onNext(t);
next.increment();
} else {
dropped.increment();
}
}
@Override
public void setProducer(Producer p) {
p.request(Long.MAX_VALUE);
}
};
}
public enum Counters {
onNext,
onError,
onComplete,
dropped
}
public enum Gauges {
subscribe,
requested
}
}
| 7,778 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis/operators/OnSubscribeRedo.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.mantis.operators;
import static rx.Observable.create;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import rx.Notification;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Observable.Operator;
import rx.Producer;
import rx.Scheduler;
import rx.Subscriber;
import rx.functions.Action0;
import rx.functions.Func1;
import rx.functions.Func2;
import rx.schedulers.Schedulers;
import rx.subjects.PublishSubject;
import rx.subscriptions.SerialSubscription;
public final class OnSubscribeRedo<T> implements OnSubscribe<T> {
static final Func1<Observable<? extends Notification<?>>, Observable<?>> REDO_INIFINITE = new Func1<Observable<? extends Notification<?>>, Observable<?>>() {
@Override
public Observable<?> call(Observable<? extends Notification<?>> ts) {
return ts.map(new Func1<Notification<?>, Notification<?>>() {
@Override
public Notification<?> call(Notification<?> terminal) {
return Notification.createOnNext(null);
}
});
}
};
private final Func1<? super Observable<? extends Notification<?>>, ? extends Observable<?>> controlHandlerFunction;
private final Scheduler scheduler;
private Observable<T> source;
private boolean stopOnComplete;
private boolean stopOnError;
public OnSubscribeRedo(Observable<T> source, Func1<? super Observable<? extends Notification<?>>, ? extends Observable<?>> f, boolean stopOnComplete, boolean stopOnError,
Scheduler scheduler) {
this.source = source;
this.controlHandlerFunction = f;
this.stopOnComplete = stopOnComplete;
this.stopOnError = stopOnError;
this.scheduler = scheduler;
}
public static <T> Observable<T> retry(Observable<T> source) {
return retry(source, REDO_INIFINITE);
}
public static <T> Observable<T> retry(Observable<T> source, final long count) {
if (count < 0)
throw new IllegalArgumentException("count >= 0 expected");
if (count == 0)
return source;
return retry(source, new RedoFinite(count));
}
public static <T> Observable<T> retry(Observable<T> source, Func1<? super Observable<? extends Notification<?>>, ? extends Observable<?>> notificationHandler) {
return create(new OnSubscribeRedo<T>(source, notificationHandler, true, false, Schedulers.trampoline()));
}
public static <T> Observable<T> retry(Observable<T> source, Func1<? super Observable<? extends Notification<?>>, ? extends Observable<?>> notificationHandler, Scheduler scheduler) {
return create(new OnSubscribeRedo<T>(source, notificationHandler, true, false, scheduler));
}
public static <T> Observable<T> repeat(Observable<T> source) {
return repeat(source, Schedulers.trampoline());
}
public static <T> Observable<T> repeat(Observable<T> source, Scheduler scheduler) {
return repeat(source, REDO_INIFINITE, scheduler);
}
public static <T> Observable<T> repeat(Observable<T> source, final long count) {
return repeat(source, count, Schedulers.trampoline());
}
public static <T> Observable<T> repeat(Observable<T> source, final long count, Scheduler scheduler) {
if (count == 0) {
return Observable.empty();
}
if (count < 0)
throw new IllegalArgumentException("count >= 0 expected");
return repeat(source, new RedoFinite(count - 1), scheduler);
}
public static <T> Observable<T> repeat(Observable<T> source, Func1<? super Observable<? extends Notification<?>>, ? extends Observable<?>> notificationHandler) {
return create(new OnSubscribeRedo<T>(source, notificationHandler, false, true, Schedulers.trampoline()));
}
public static <T> Observable<T> repeat(Observable<T> source, Func1<? super Observable<? extends Notification<?>>, ? extends Observable<?>> notificationHandler, Scheduler scheduler) {
return create(new OnSubscribeRedo<T>(source, notificationHandler, false, true, scheduler));
}
public static <T> Observable<T> redo(Observable<T> source, Func1<? super Observable<? extends Notification<?>>, ? extends Observable<?>> notificationHandler, Scheduler scheduler) {
return create(new OnSubscribeRedo<T>(source, notificationHandler, false, false, scheduler));
}
@Override
public void call(final Subscriber<? super T> child) {
final AtomicBoolean isLocked = new AtomicBoolean(true);
final AtomicBoolean resumeBoundary = new AtomicBoolean(true);
// incremented when requests are made, decremented when requests are fulfilled
final AtomicLong consumerCapacity = new AtomicLong(0l);
final AtomicReference<Producer> currentProducer = new AtomicReference<Producer>();
final Scheduler.Worker worker = scheduler.createWorker();
child.add(worker);
final SerialSubscription sourceSubscriptions = new SerialSubscription();
child.add(sourceSubscriptions);
final PublishSubject<Notification<?>> terminals = PublishSubject.create();
final Action0 subscribeToSource = new Action0() {
@Override
public void call() {
if (child.isUnsubscribed()) {
return;
}
Subscriber<T> terminalDelegatingSubscriber = new Subscriber<T>() {
@Override
public void onCompleted() {
unsubscribe();
terminals.onNext(Notification.createOnCompleted());
}
@Override
public void onError(Throwable e) {
unsubscribe();
terminals.onNext(Notification.createOnError(e));
}
@Override
public void onNext(T v) {
consumerCapacity.decrementAndGet();
child.onNext(v);
}
@Override
public void setProducer(Producer producer) {
currentProducer.set(producer);
producer.request(consumerCapacity.get());
}
};
// new subscription each time so if it unsubscribes itself it does not prevent retries
// by unsubscribing the child subscription
sourceSubscriptions.set(terminalDelegatingSubscriber);
source.unsafeSubscribe(terminalDelegatingSubscriber);
}
};
// the observable received by the control handler function will receive notifications of onCompleted in the case of 'repeat'
// type operators or notifications of onError for 'retry' this is done by lifting in a custom operator to selectively divert
// the retry/repeat relevant values to the control handler
final Observable<?> restarts = controlHandlerFunction.call(
terminals.lift(new Operator<Notification<?>, Notification<?>>() {
@Override
public Subscriber<? super Notification<?>> call(final Subscriber<? super Notification<?>> filteredTerminals) {
return new Subscriber<Notification<?>>(filteredTerminals) {
@Override
public void onCompleted() {
filteredTerminals.onCompleted();
}
@Override
public void onError(Throwable e) {
filteredTerminals.onError(e);
}
@Override
public void onNext(Notification<?> t) {
if (t.isOnCompleted() && stopOnComplete)
child.onCompleted();
else if (t.isOnError() && stopOnError)
child.onError(t.getThrowable());
else {
isLocked.set(false);
filteredTerminals.onNext(t);
}
}
@Override
public void setProducer(Producer producer) {
producer.request(Long.MAX_VALUE);
}
};
}
}));
// subscribe to the restarts observable to know when to schedule the next redo.
worker.schedule(new Action0() {
@Override
public void call() {
restarts.unsafeSubscribe(new Subscriber<Object>(child) {
@Override
public void onCompleted() {
child.onCompleted();
}
@Override
public void onError(Throwable e) {
child.onError(e);
}
@Override
public void onNext(Object t) {
if (!isLocked.get() && !child.isUnsubscribed()) {
if (consumerCapacity.get() > 0) {
worker.schedule(subscribeToSource);
} else {
resumeBoundary.compareAndSet(false, true);
}
}
}
@Override
public void setProducer(Producer producer) {
producer.request(Long.MAX_VALUE);
}
});
}
});
child.setProducer(new Producer() {
@Override
public void request(final long n) {
long c = consumerCapacity.getAndAdd(n);
Producer producer = currentProducer.get();
if (producer != null) {
producer.request(c + n);
} else if (c == 0 && resumeBoundary.compareAndSet(true, false)) {
worker.schedule(subscribeToSource);
}
}
});
}
public static final class RedoFinite implements Func1<Observable<? extends Notification<?>>, Observable<?>> {
private final long count;
public RedoFinite(long count) {
this.count = count;
}
@Override
public Observable<?> call(Observable<? extends Notification<?>> ts) {
return ts.map(new Func1<Notification<?>, Notification<?>>() {
int num = 0;
@Override
public Notification<?> call(Notification<?> terminalNotification) {
if (count == 0) {
return terminalNotification;
}
num++;
if (num <= count) {
return Notification.createOnNext(num);
} else {
return terminalNotification;
}
}
}).dematerialize();
}
}
public static final class RetryWithPredicate implements Func1<Observable<? extends Notification<?>>, Observable<? extends Notification<?>>> {
private Func2<Integer, Throwable, Boolean> predicate;
public RetryWithPredicate(Func2<Integer, Throwable, Boolean> predicate) {
this.predicate = predicate;
}
@Override
public Observable<? extends Notification<?>> call(Observable<? extends Notification<?>> ts) {
return ts.scan(Notification.createOnNext(0), new Func2<Notification<Integer>, Notification<?>, Notification<Integer>>() {
@SuppressWarnings("unchecked")
@Override
public Notification<Integer> call(Notification<Integer> n, Notification<?> term) {
final int value = n.getValue();
if (predicate.call(value, term.getThrowable()).booleanValue())
return Notification.createOnNext(value + 1);
else
return (Notification<Integer>) term;
}
});
}
}
}
| 7,779 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis/operators/OperatorOnErrorResumeNextViaObservable.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.mantis.operators;
import rx.Observable;
import rx.Observable.Operator;
import rx.Producer;
import rx.Subscriber;
import rx.exceptions.Exceptions;
import rx.plugins.RxJavaPlugins;
/**
* Instruct an Observable to pass control to another Observable rather than invoking
* <code>onError</code> if it encounters an error.
* <p>
* <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/onErrorResumeNext.png" alt="">
* <p>
* By default, when an Observable encounters an error that prevents it from emitting the expected item to its
* Observer, the Observable invokes its Observer's {@code onError} method, and then quits without invoking any
* more of its Observer's methods. The {@code onErrorResumeNext} operation changes this behavior. If you pass
* an Observable ({@code resumeSequence}) to {@code onErrorResumeNext}, if the source Observable encounters an
* error, instead of invoking its Observer's {@code onError} method, it will instead relinquish control to this
* new Observable, which will invoke the Observer's {@code onNext} method if it is able to do so. In such a
* case, because no Observable necessarily invokes {@code onError}, the Observer may never know that an error
* happened.
* <p>
* You can use this to prevent errors from propagating or to supply fallback data should errors be
* encountered.
* @deprecated use the built in RxJava onErrorResumeNext operator instead
* @param <T> the value type
*/
@Deprecated
public final class OperatorOnErrorResumeNextViaObservable<T> implements Operator<T, T> {
final Observable<? extends T> resumeSequence;
public OperatorOnErrorResumeNextViaObservable(Observable<? extends T> resumeSequence) {
this.resumeSequence = resumeSequence;
}
@Override
public Subscriber<? super T> call(final Subscriber<? super T> child) {
// shared subscription won't work here
Subscriber<T> s = new Subscriber<T>() {
private boolean done = false;
@Override
public void onNext(T t) {
if (done) {
return;
}
child.onNext(t);
}
@Override
public void onError(Throwable e) {
if (done) {
Exceptions.throwIfFatal(e);
return;
}
done = true;
RxJavaPlugins.getInstance().getErrorHandler().handleError(e);
unsubscribe();
resumeSequence.unsafeSubscribe(child);
}
@Override
public void onCompleted() {
if (done) {
return;
}
done = true;
child.onCompleted();
}
@Override
public void setProducer(final Producer producer) {
child.setProducer(new Producer() {
@Override
public void request(long n) {
producer.request(n);
}
});
}
};
child.add(s);
return s;
}
}
| 7,780 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis/operators/DoOnRequestOperator.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.mantis.operators;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable.Operator;
import rx.Producer;
import rx.Subscriber;
public class DoOnRequestOperator<T> implements Operator<T, T> {
private static final Logger logger = LoggerFactory.getLogger(DoOnRequestOperator.class);
private final String name;
public DoOnRequestOperator(String name) {
this.name = name;
}
@Override
public Subscriber<? super T> call(Subscriber<? super T> child) {
final RequestSubscriber<T> requestSubscriber = new RequestSubscriber<T>(child);
child.setProducer(new Producer() {
@Override
public void request(long n) {
if (n > 10000) {
logger.info("DoOnRequest + " + name + " Requested------>: " + n);
}
requestSubscriber.requestMore(n);
}
});
return requestSubscriber;
}
static class RequestSubscriber<T> extends Subscriber<T> {
final Subscriber<? super T> child;
boolean once = false;
public RequestSubscriber(Subscriber<? super T> child) {
super(child);
this.child = child;
}
@Override
public void onStart() {
if (!once) {
// don't request anything until the child requests via requestMore
request(0);
}
}
void requestMore(long n) {
once = true;
request(n);
}
@Override
public void onCompleted() {
child.onCompleted();
}
@Override
public void onError(Throwable e) {
child.onError(e);
}
@Override
public void onNext(T t) {
child.onNext(t);
}
}
}
| 7,781 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis | Create_ds/mantis/mantis-common/src/main/java/io/reactivx/mantis/operators/GroupedObservableUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivx.mantis.operators;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Subscriber;
import rx.observables.GroupedObservable;
/**
* copied from https://github.com/Netflix/Turbine/blob/2.x/turbine-core/src/main/java/com/netflix/turbine/internal/GroupedObservableUtils.java
*
* @author njoshi
*/
public class GroupedObservableUtils {
// TODO can we do without this?
public static <K, T> GroupedObservable<K, T> createGroupedObservable(K key, final Observable<T> o) {
return GroupedObservable.create(key, new OnSubscribe<T>() {
@Override
public void call(Subscriber<? super T> s) {
o.unsafeSubscribe(s);
}
});
}
}
| 7,782 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/JobConstraints.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime;
import java.io.Serializable;
public enum JobConstraints implements Serializable {
UniqueHost,
ExclusiveHost,
ZoneBalance,
M4Cluster,
M3Cluster,
M5Cluster
}
| 7,783 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/WorkerMigrationConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Optional;
public class WorkerMigrationConfig {
public static final WorkerMigrationConfig DEFAULT = new WorkerMigrationConfig(MigrationStrategyEnum.PERCENTAGE, "{\"percentToMove\":25,\"intervalMs\":60000}");
private MigrationStrategyEnum strategy;
private String configString;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public WorkerMigrationConfig(@JsonProperty("strategy") final MigrationStrategyEnum strategy,
@JsonProperty("configString") final String configString) {
this.strategy = Optional.ofNullable(strategy).orElse(MigrationStrategyEnum.ONE_WORKER);
this.configString = configString;
}
public static void main(String[] args) {
final WorkerMigrationConfig workerMigrationConfig = new WorkerMigrationConfig(MigrationStrategyEnum.ONE_WORKER, "{'name':'value'}");
System.out.println(workerMigrationConfig);
final WorkerMigrationConfig workerMigrationConfig2 = WorkerMigrationConfig.DEFAULT;
System.out.println(workerMigrationConfig2);
}
public MigrationStrategyEnum getStrategy() {
return strategy;
}
public String getConfigString() {
return configString;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
WorkerMigrationConfig that = (WorkerMigrationConfig) o;
if (strategy != that.strategy) return false;
return configString != null ? configString.equals(that.configString) : that.configString == null;
}
@Override
public int hashCode() {
int result = strategy != null ? strategy.hashCode() : 0;
result = 31 * result + (configString != null ? configString.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "WorkerMigrationConfig{" +
"strategy=" + strategy +
", configString='" + configString + '\'' +
'}';
}
public enum MigrationStrategyEnum {ONE_WORKER, PERCENTAGE}
} | 7,784 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/JobSla.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JobSla {
public static final String uniqueTagName = "unique";
private static final Logger logger = LoggerFactory.getLogger(JobSla.class);
private final long runtimeLimitSecs;
private final long minRuntimeSecs;
private final StreamSLAType slaType;
private final MantisJobDurationType durationType;
private final String userProvidedType;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public JobSla(@JsonProperty("runtimeLimitSecs") long runtimeLimitSecs,
@JsonProperty("minRuntimeSecs") long minRuntimeSecs,
@JsonProperty("slaType") StreamSLAType slaType,
@JsonProperty("durationType") MantisJobDurationType durationType,
@JsonProperty("userProvidedType") String userProvidedType) {
this.runtimeLimitSecs = Math.max(0L, runtimeLimitSecs);
this.minRuntimeSecs = Math.max(0L, minRuntimeSecs);
this.slaType = slaType == null ? StreamSLAType.Lossy : slaType;
this.durationType = durationType;
this.userProvidedType = userProvidedType;
}
public long getRuntimeLimitSecs() {
return runtimeLimitSecs;
}
public long getMinRuntimeSecs() {
return minRuntimeSecs;
}
public StreamSLAType getSlaType() {
return slaType;
}
public MantisJobDurationType getDurationType() {
return durationType;
}
public String getUserProvidedType() {
return userProvidedType;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((durationType == null) ? 0 : durationType.hashCode());
result = prime * result + (int) (minRuntimeSecs ^ (minRuntimeSecs >>> 32));
result = prime * result + (int) (runtimeLimitSecs ^ (runtimeLimitSecs >>> 32));
result = prime * result + ((slaType == null) ? 0 : slaType.hashCode());
result = prime * result + ((userProvidedType == null) ? 0 : userProvidedType.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
JobSla other = (JobSla) obj;
if (durationType != other.durationType)
return false;
if (minRuntimeSecs != other.minRuntimeSecs)
return false;
if (runtimeLimitSecs != other.runtimeLimitSecs)
return false;
if (slaType != other.slaType)
return false;
if (userProvidedType == null) {
if (other.userProvidedType != null)
return false;
} else if (!userProvidedType.equals(other.userProvidedType))
return false;
return true;
}
@Override
public String toString() {
return "JobSla [runtimeLimitSecs=" + runtimeLimitSecs + ", minRuntimeSecs=" + minRuntimeSecs + ", slaType="
+ slaType + ", durationType=" + durationType + ", userProvidedType=" + userProvidedType + "]";
}
public enum StreamSLAType {
Lossy
}
public static class Builder {
private static final ObjectMapper objectMapper = new ObjectMapper();
private long runtimeLimit = 0L;
private long minRuntimeSecs = 0L;
private StreamSLAType slaType = StreamSLAType.Lossy;
private MantisJobDurationType durationType = MantisJobDurationType.Perpetual;
private Map<String, String> userProvidedTypes = new HashMap<>();
public Builder withRuntimeLimit(long limit) {
this.runtimeLimit = limit;
return this;
}
public Builder withMinRuntimeSecs(long minRuntimeSecs) {
this.minRuntimeSecs = minRuntimeSecs;
return this;
}
public Builder withSlaType(StreamSLAType slaType) {
this.slaType = slaType;
return this;
}
public Builder withDurationType(MantisJobDurationType durationType) {
this.durationType = durationType;
return this;
}
// Sets the job's unique tag value which is used to determine if two jobs are identical.
// This is primarily used to determine if a job already exists and connect to it instead of submitting a
// duplicate identical job.
public Builder withUniqueJobTagValue(String value) {
userProvidedTypes.put(uniqueTagName, value);
return this;
}
public Builder withUserTag(String key, String value) {
userProvidedTypes.put(key, value);
return this;
}
public JobSla build() {
try {
return new JobSla(runtimeLimit, minRuntimeSecs, slaType, durationType, objectMapper.writeValueAsString(userProvidedTypes));
} catch (JsonProcessingException e) {
throw new RuntimeException("Unexpected error creating json out of user tags map: " + e.getMessage(), e);
}
}
}
}
| 7,785 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/NamedJobDefinition.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
public class NamedJobDefinition {
private final MantisJobDefinition jobDefinition;
private final JobOwner owner;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public NamedJobDefinition(@JsonProperty("jobDefinition") MantisJobDefinition jobDefinition,
@JsonProperty("owner") JobOwner owner) {
this.jobDefinition = jobDefinition;
this.owner = owner;
}
public MantisJobDefinition getJobDefinition() {
return jobDefinition;
}
public JobOwner getOwner() {
return owner;
}
@Override
public String toString() {
return "NamedJobDefinition{" +
"jobDefinition=" + jobDefinition +
", owner=" + owner +
'}';
}
public enum CronPolicy {KEEP_EXISTING, KEEP_NEW}
}
| 7,786 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/MantisJobDefinition.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime;
import io.mantisrx.common.Label;
import io.mantisrx.runtime.command.InvalidJobException;
import io.mantisrx.runtime.descriptor.DeploymentStrategy;
import io.mantisrx.runtime.descriptor.SchedulingInfo;
import io.mantisrx.runtime.descriptor.StageSchedulingInfo;
import io.mantisrx.runtime.parameter.Parameter;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.net.URL;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
public class MantisJobDefinition {
private static final long serialVersionUID = 1L;
private String name;
private String user;
private URL jobJarFileLocation;
private String version;
private List<Parameter> parameters;
private JobSla jobSla;
private long subscriptionTimeoutSecs = 0L;
private SchedulingInfo schedulingInfo;
private DeploymentStrategy deploymentStrategy;
private int slaMin = 0;
private int slaMax = 0;
private String cronSpec = "";
private NamedJobDefinition.CronPolicy cronPolicy = null;
private boolean isReadyForJobMaster = false;
private WorkerMigrationConfig migrationConfig;
private List<Label> labels;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public MantisJobDefinition(@JsonProperty("name") String name,
@JsonProperty("user") String user,
@JsonProperty("url") URL jobJarFileLocation,
@JsonProperty("version") String version,
@JsonProperty("parameters") List<Parameter> parameters,
@JsonProperty("jobSla") JobSla jobSla,
@JsonProperty("subscriptionTimeoutSecs") long subscriptionTimeoutSecs,
@JsonProperty("schedulingInfo") SchedulingInfo schedulingInfo,
@JsonProperty("slaMin") int slaMin,
@JsonProperty("slaMax") int slaMax,
@JsonProperty("cronSpec") String cronSpec,
@JsonProperty("cronPolicy") NamedJobDefinition.CronPolicy cronPolicy,
@JsonProperty("isReadyForJobMaster") boolean isReadyForJobMaster,
@JsonProperty("migrationConfig") WorkerMigrationConfig migrationConfig,
@JsonProperty("labels") List<Label> labels,
@JsonProperty("deploymentStrategy") DeploymentStrategy deploymentStrategy
) {
this.name = name;
this.user = user;
this.jobJarFileLocation = jobJarFileLocation;
this.version = version;
if (parameters != null) {
this.parameters = parameters;
} else {
this.parameters = new LinkedList<>();
}
if (labels != null) {
this.labels = labels;
} else {
this.labels = new LinkedList<>();
}
this.jobSla = jobSla;
if (subscriptionTimeoutSecs > 0)
this.subscriptionTimeoutSecs = subscriptionTimeoutSecs;
this.schedulingInfo = schedulingInfo;
this.deploymentStrategy = deploymentStrategy;
this.slaMin = slaMin;
this.slaMax = slaMax;
this.cronSpec = cronSpec;
this.cronPolicy = cronPolicy;
this.isReadyForJobMaster = isReadyForJobMaster;
this.migrationConfig = Optional.ofNullable(migrationConfig).orElse(WorkerMigrationConfig.DEFAULT);
}
public void validate(boolean schedulingInfoOptional) throws InvalidJobException {
validateSla();
validateSchedulingInfo(schedulingInfoOptional);
}
private void validateSla() throws InvalidJobException {
if (jobSla == null)
throw new InvalidJobException("No Job SLA provided (likely incorrect job submit request)");
if (jobSla.getDurationType() == null)
throw new InvalidJobException("Invalid null duration type in job sla (likely incorrect job submit request");
}
public void validateSchedulingInfo() throws InvalidJobException {
validateSchedulingInfo(false);
}
private void validateSchedulingInfo(boolean schedulingInfoOptional) throws InvalidJobException {
if (schedulingInfoOptional && schedulingInfo == null)
return;
if (schedulingInfo == null)
throw new InvalidJobException("No scheduling info provided");
if (schedulingInfo.getStages() == null)
throw new InvalidJobException("No stages defined in scheduling info");
int numStages = schedulingInfo.getStages().size();
int startingIdx = 1;
if (schedulingInfo.forStage(0) != null) {
// jobMaster stage 0 definition exists, adjust index range
startingIdx = 0;
numStages--;
}
for (int i = startingIdx; i <= numStages; i++) {
StageSchedulingInfo stage = schedulingInfo.getStages().get(i);
if (stage == null)
throw new InvalidJobException("No definition for stage " + i + " in scheduling info for " + numStages + " stage job");
if (stage.getNumberOfInstances() < 1)
throw new InvalidJobException("Number of instance for stage " + i + " must be >0, not " + stage.getNumberOfInstances());
MachineDefinition machineDefinition = stage.getMachineDefinition();
if (machineDefinition.getCpuCores() <= 0)
throw new InvalidJobException("cpuCores must be >0.0, not " + machineDefinition.getCpuCores());
if (machineDefinition.getMemoryMB() <= 0)
throw new InvalidJobException("memory must be <0.0, not " + machineDefinition.getMemoryMB());
if (machineDefinition.getDiskMB() < 0)
throw new InvalidJobException("disk must be >=0, not " + machineDefinition.getDiskMB());
if (machineDefinition.getNumPorts() < 0)
throw new InvalidJobException("numPorts must be >=0, not " + machineDefinition.getNumPorts());
}
}
public String getName() {
return name;
}
public String getUser() {
return user;
}
public String getVersion() {
return version;
}
public URL getJobJarFileLocation() {
return jobJarFileLocation;
}
public List<Parameter> getParameters() {
return parameters;
}
public JobSla getJobSla() {
return jobSla;
}
public long getSubscriptionTimeoutSecs() {
return subscriptionTimeoutSecs;
}
public SchedulingInfo getSchedulingInfo() {
return schedulingInfo;
}
public void setSchedulingInfo(SchedulingInfo schedulingInfo) {
this.schedulingInfo = schedulingInfo;
}
public DeploymentStrategy getDeploymentStrategy() {
return deploymentStrategy;
}
public int getSlaMin() {
return slaMin;
}
public int getSlaMax() {
return slaMax;
}
public String getCronSpec() {
return cronSpec;
}
public NamedJobDefinition.CronPolicy getCronPolicy() {
return cronPolicy;
}
public boolean getIsReadyForJobMaster() {
return isReadyForJobMaster;
}
public WorkerMigrationConfig getMigrationConfig() {
return migrationConfig;
}
public List<Label> getLabels() {
return this.labels;
}
@Override
public String toString() {
return "MantisJobDefinition{" +
"name='" + name + '\'' +
", user='" + user + '\'' +
", jobJarFileLocation=" + jobJarFileLocation +
", version='" + version + '\'' +
", parameters=" + parameters +
", labels=" + labels +
", jobSla=" + jobSla +
", subscriptionTimeoutSecs=" + subscriptionTimeoutSecs +
", schedulingInfo=" + schedulingInfo +
", slaMin=" + slaMin +
", slaMax=" + slaMax +
", cronSpec='" + cronSpec + '\'' +
", cronPolicy=" + cronPolicy +
", isReadyForJobMaster=" + isReadyForJobMaster +
", migrationConfig=" + migrationConfig +
'}';
}
}
| 7,787 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/JobOwner.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
public class JobOwner {
private final String name;
private final String teamName;
private final String description;
private final String contactEmail;
private final String repo;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public JobOwner(@JsonProperty("name") String name, @JsonProperty("teamName") String teamName,
@JsonProperty("description") String description, @JsonProperty("contactEmail") String contactEmail,
@JsonProperty("repo") String repo) {
this.name = name;
this.teamName = teamName;
this.description = description;
this.contactEmail = contactEmail;
this.repo = repo;
}
public String getName() {
return name;
}
public String getTeamName() {
return teamName;
}
public String getDescription() {
return description;
}
public String getContactEmail() {
return contactEmail;
}
public String getRepo() {
return repo;
}
}
| 7,788 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/MantisJobDurationType.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime;
public enum MantisJobDurationType {
Perpetual,
Transient;
}
| 7,789 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/MantisJobState.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime;
import java.util.HashMap;
import java.util.Map;
public enum MantisJobState {
Accepted,
Launched, // scheduled and sent to slave
StartInitiated, // initial message from slave worker, about to start
Started, // actually started running
Failed, // OK to handle as a resubmit
Completed, // terminal state, not necessarily successful
Noop; // internal use only
private static final Map<MantisJobState, MantisJobState[]> validChanges;
private static final Map<MantisJobState, MetaState> metaStates;
static {
validChanges = new HashMap<>();
validChanges.put(Accepted, new MantisJobState[] {Launched, Failed, Completed});
validChanges.put(Launched, new MantisJobState[] {StartInitiated, Started, Failed, Completed});
validChanges.put(StartInitiated, new MantisJobState[] {StartInitiated, Started, Completed, Failed});
validChanges.put(Started, new MantisJobState[] {Started, Completed, Failed});
validChanges.put(Failed, new MantisJobState[] {});
validChanges.put(Completed, new MantisJobState[] {});
metaStates = new HashMap<>();
metaStates.put(Accepted, MetaState.Active);
metaStates.put(Launched, MetaState.Active);
metaStates.put(StartInitiated, MetaState.Active);
metaStates.put(Started, MetaState.Active);
metaStates.put(Failed, MetaState.Terminal);
metaStates.put(Completed, MetaState.Terminal);
}
public enum MetaState {
Active,
Terminal
}
public static MetaState toMetaState(MantisJobState state) {
return metaStates.get(state);
}
public boolean isValidStateChgTo(MantisJobState newState) {
for(MantisJobState validState: validChanges.get(this))
if(validState == newState)
return true;
return false;
}
public boolean isTerminalState() {
return isTerminalState(this);
}
public static boolean isTerminalState(MantisJobState state) {
switch (state) {
case Failed:
case Completed:
return true;
default:
return false;
}
}
public static boolean isErrorState(MantisJobState started) {
switch (started) {
case Failed:
return true;
default:
return false;
}
}
public static boolean isRunningState(MantisJobState state) {
switch (state) {
case Launched:
case StartInitiated:
case Started:
return true;
default:
return false;
}
}
public static boolean isOnSlaveState(MantisJobState state) {
switch (state) {
case StartInitiated:
case Started:
return true;
default:
return false;
}
}
public static boolean isOnStartedState(MantisJobState state) {
return state == MantisJobState.Started;
}
}
| 7,790 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/MachineDefinition.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import io.mantisrx.shaded.com.google.common.annotations.VisibleForTesting;
import java.io.Serializable;
public class MachineDefinition implements Serializable {
private static final long serialVersionUID = 1L;
private static final double defaultMbps = 128.0;
private static final int minPorts = 1;
private final double cpuCores;
private final double memoryMB;
private final double networkMbps;
private final double diskMB;
private final int numPorts;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public MachineDefinition(@JsonProperty("cpuCores") double cpuCores,
@JsonProperty("memoryMB") double memoryMB,
@JsonProperty("networkMbps") double networkMbps,
@JsonProperty("diskMB") double diskMB,
@JsonProperty("numPorts") int numPorts) {
this.cpuCores = cpuCores;
this.memoryMB = memoryMB;
this.networkMbps = networkMbps == 0 ? defaultMbps : networkMbps;
this.diskMB = diskMB;
this.numPorts = Math.max(minPorts, numPorts);
}
@VisibleForTesting
public MachineDefinition(double cpuCores, double memoryMB, double diskMB, int numPorts) {
this.cpuCores = cpuCores;
this.memoryMB = memoryMB;
this.diskMB = diskMB;
this.numPorts = Math.max(minPorts, numPorts);
this.networkMbps = 128;
}
public double getCpuCores() {
return cpuCores;
}
public double getMemoryMB() {
return memoryMB;
}
public double getNetworkMbps() {
return networkMbps;
}
public double getDiskMB() {
return diskMB;
}
public int getNumPorts() {
return numPorts;
}
@Override
public String toString() {
return "MachineDefinition{" +
"cpuCores=" + cpuCores +
", memoryMB=" + memoryMB +
", networkMbps=" + networkMbps +
", diskMB=" + diskMB +
", numPorts=" + numPorts +
'}';
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(cpuCores);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(diskMB);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(memoryMB);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(networkMbps);
result = prime * result + (int) (temp ^ (temp >>> 32));
result = prime * result + numPorts;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
MachineDefinition other = (MachineDefinition) obj;
if (Double.doubleToLongBits(cpuCores) != Double.doubleToLongBits(other.cpuCores))
return false;
if (Double.doubleToLongBits(diskMB) != Double.doubleToLongBits(other.diskMB))
return false;
if (Double.doubleToLongBits(memoryMB) != Double.doubleToLongBits(other.memoryMB))
return false;
if (Double.doubleToLongBits(networkMbps) != Double.doubleToLongBits(other.networkMbps))
return false;
if (numPorts != other.numPorts)
return false;
return true;
}
// checks if the current machine can match the requirements of the passed machine definition
public boolean canFit(MachineDefinition o) {
return this.cpuCores >= o.cpuCores &&
this.memoryMB >= o.memoryMB &&
this.networkMbps >= o.networkMbps &&
this.diskMB >= o.diskMB &&
this.numPorts >= o.numPorts;
}
}
| 7,791 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/MigrationStrategy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime;
import java.util.List;
import java.util.concurrent.ConcurrentSkipListSet;
public abstract class MigrationStrategy {
private final WorkerMigrationConfig config;
public MigrationStrategy(final WorkerMigrationConfig config) {
this.config = config;
}
/**
* @param workersOnDisabledVms set of WorkerNumber on disabled VM
* @param numRunningWorkers total number of running workers for this job
* @param totalNumWorkers total number of workers for this job
* @param lastWorkerMigrationTimestamp last timestamp at which a worker was migrated for this job
*
* @return list of WorkerNumber to migrate in this iteration
*/
abstract public List<Integer> execute(final ConcurrentSkipListSet<Integer> workersOnDisabledVms,
final int numRunningWorkers,
final int totalNumWorkers,
final long lastWorkerMigrationTimestamp);
}
| 7,792 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/codec/JsonType.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.codec;
public interface JsonType {
}
| 7,793 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/codec/JacksonCodecs.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.codec;
import io.mantisrx.common.codec.Codec;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.core.type.TypeReference;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectReader;
import io.mantisrx.shaded.com.fasterxml.jackson.dataformat.cbor.CBORFactory;
import io.mantisrx.shaded.com.fasterxml.jackson.module.afterburner.AfterburnerModule;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public class JacksonCodecs {
private final static ObjectMapper mapper;
static {
mapper = new ObjectMapper(new CBORFactory());
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
mapper.registerModule(new AfterburnerModule());
}
public static <T> Codec<T> pojo(final Class<T> clazz) {
return new Codec<T>() {
@Override
public byte[] encode(T value) {
try {
return mapper.writeValueAsBytes(value);
} catch (JsonProcessingException e) {
throw new RuntimeException("Failed to write bytes for value: " + value, e);
}
}
@Override
public T decode(byte[] bytes) {
try {
return mapper.readValue(bytes, clazz);
} catch (IOException e) {
throw new RuntimeException("Failed to convert to type: " + clazz.toString(), e);
}
}
};
}
public static <T> Codec<List<T>> list() {
return new Codec<List<T>>() {
@Override
public byte[] encode(List<T> value) {
try {
return mapper.writeValueAsBytes(value);
} catch (JsonProcessingException e) {
throw new RuntimeException("Failed to write list to bytes", e);
}
}
@Override
public List<T> decode(byte[] bytes) {
try {
return mapper.readValue(bytes, new TypeReference<List<T>>() {});
} catch (IOException e) {
throw new RuntimeException("Failed to convert bytes to list", e);
}
}
};
}
public static Codec<Map<String, Object>> mapStringObject() {
return new Codec<Map<String, Object>>() {
private final ObjectReader reader = mapper.readerFor(Map.class);
@Override
public byte[] encode(Map<String, Object> map) {
try {
return mapper.writeValueAsBytes(map);
} catch (IOException e) {
throw new RuntimeException("Failed to write bytes for map: " + map, e);
}
}
@Override
public Map<String, Object> decode(byte[] bytes) {
try {
return reader.readValue(bytes);
} catch (IOException e) {
throw new RuntimeException("Failed to convert bytes to map", e);
}
}
};
}
}
| 7,794 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/codec/JsonCodec.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.codec;
import io.mantisrx.common.codec.Codec;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
public class JsonCodec<T extends JsonType> implements Codec<T> {
private static ObjectMapper mapper = new ObjectMapper();
private Class<T> clazz;
/**
* Use {@link JacksonCodecs}
*
* @param clazz
*/
@Deprecated
public JsonCodec(Class<T> clazz) {
this.clazz = clazz;
mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, true);
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
@Override
public byte[] encode(T value) {
try {
return mapper.writeValueAsBytes(value);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
@Override
public T decode(byte[] bytes) {
try {
return mapper.readValue(bytes, clazz);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 7,795 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/descriptor/SchedulingInfo.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.descriptor;
import io.mantisrx.runtime.JobConstraints;
import io.mantisrx.runtime.MachineDefinition;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnore;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.io.Serializable;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Function;
import lombok.EqualsAndHashCode;
import lombok.ToString;
@EqualsAndHashCode
@ToString
public class SchedulingInfo implements Serializable {
private static final long serialVersionUID = 1L;
private Map<Integer, StageSchedulingInfo> stages = new HashMap<>();
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public SchedulingInfo(
@JsonProperty("stages") Map<Integer, StageSchedulingInfo> stages) {
this.stages = stages;
}
@JsonIgnore
SchedulingInfo(Builder builder) {
stages.putAll(builder.builderStages);
}
public Map<Integer, StageSchedulingInfo> getStages() {
return stages;
}
public void addJobMasterStage(StageSchedulingInfo schedulingInfo) {
stages.put(0, schedulingInfo);
}
public StageSchedulingInfo forStage(int stageNum) {
return stages.get(stageNum);
}
public static class Builder {
private final Map<Integer, StageSchedulingInfo> builderStages = new HashMap<>();
private Integer currentStage = 1;
private int numberOfStages;
public Builder addStage(StageSchedulingInfo stageSchedulingInfo) {
builderStages.put(currentStage, stageSchedulingInfo);
currentStage++;
return this;
}
public void addJobMasterStage(StageSchedulingInfo schedulingInfo) {
builderStages.put(0, schedulingInfo);
}
public Builder numberOfStages(int numberOfStages) {
this.numberOfStages = numberOfStages;
return this;
}
public Builder singleWorkerStageWithConstraints(
MachineDefinition machineDefinition,
List<JobConstraints> hardConstraints,
List<JobConstraints> softConstraints) {
return this.addStage(
StageSchedulingInfo.builder()
.numberOfInstances(1)
.machineDefinition(machineDefinition)
.hardConstraints(hardConstraints)
.softConstraints(softConstraints)
.build());
}
public Builder singleWorkerStageWithConstraints(
MachineDefinition machineDefinition,
List<JobConstraints> hardConstraints,
List<JobConstraints> softConstraints,
Map<String, String> containerAttributes) {
return this.addStage(
StageSchedulingInfo.builder()
.numberOfInstances(1)
.machineDefinition(machineDefinition)
.hardConstraints(hardConstraints)
.softConstraints(softConstraints)
.containerAttributes(containerAttributes)
.build());
}
public Builder singleWorkerStage(MachineDefinition machineDefinition) {
return this.addStage(
StageSchedulingInfo.builder()
.numberOfInstances(1)
.machineDefinition(machineDefinition)
.build());
}
public Builder singleWorkerStage(MachineDefinition machineDefinition, Map<String, String> containerAttributes) {
return this.addStage(
StageSchedulingInfo.builder()
.numberOfInstances(1)
.machineDefinition(machineDefinition)
.containerAttributes(containerAttributes)
.build());
}
public Builder multiWorkerScalableStageWithConstraints(int numberOfWorkers, MachineDefinition machineDefinition,
List<JobConstraints> hardConstraints, List<JobConstraints> softConstraints,
StageScalingPolicy scalingPolicy) {
StageScalingPolicy ssp = new StageScalingPolicy(currentStage, scalingPolicy.getMin(), scalingPolicy.getMax(),
scalingPolicy.getIncrement(), scalingPolicy.getDecrement(), scalingPolicy.getCoolDownSecs(), scalingPolicy.getStrategies());
return this.addStage(
StageSchedulingInfo.builder()
.numberOfInstances(numberOfWorkers)
.machineDefinition(machineDefinition)
.hardConstraints(hardConstraints)
.softConstraints(softConstraints)
.scalingPolicy(ssp)
.scalable(ssp.isEnabled())
.build());
}
public Builder multiWorkerScalableStageWithConstraints(int numberOfWorkers, MachineDefinition machineDefinition,
List<JobConstraints> hardConstraints, List<JobConstraints> softConstraints,
StageScalingPolicy scalingPolicy, Map<String, String> containerAttributes) {
StageScalingPolicy ssp = new StageScalingPolicy(currentStage, scalingPolicy.getMin(), scalingPolicy.getMax(),
scalingPolicy.getIncrement(), scalingPolicy.getDecrement(), scalingPolicy.getCoolDownSecs(), scalingPolicy.getStrategies());
return this.addStage(
StageSchedulingInfo.builder()
.numberOfInstances(numberOfWorkers)
.machineDefinition(machineDefinition)
.hardConstraints(hardConstraints)
.softConstraints(softConstraints)
.scalingPolicy(ssp)
.scalable(ssp.isEnabled())
.containerAttributes(containerAttributes)
.build());
}
public Builder multiWorkerStageWithConstraints(int numberOfWorkers, MachineDefinition machineDefinition,
List<JobConstraints> hardConstraints, List<JobConstraints> softConstraints) {
return this.addStage(
StageSchedulingInfo.builder()
.numberOfInstances(numberOfWorkers)
.machineDefinition(machineDefinition)
.hardConstraints(hardConstraints)
.softConstraints(softConstraints)
.build());
}
public Builder multiWorkerStageWithConstraints(int numberOfWorkers, MachineDefinition machineDefinition,
List<JobConstraints> hardConstraints, List<JobConstraints> softConstraints, Map<String, String> containerAttributes) {
return this.addStage(
StageSchedulingInfo.builder()
.numberOfInstances(numberOfWorkers)
.machineDefinition(machineDefinition)
.hardConstraints(hardConstraints)
.softConstraints(softConstraints)
.containerAttributes(containerAttributes)
.build());
}
public Builder multiWorkerStage(int numberOfWorkers, MachineDefinition machineDefinition) {
return multiWorkerStage(numberOfWorkers, machineDefinition, false);
}
public Builder multiWorkerStage(
int numberOfWorkers, MachineDefinition machineDefinition, Map<String, String> containerAttributes) {
return multiWorkerStage(numberOfWorkers, machineDefinition, false, containerAttributes);
}
public Builder multiWorkerStage(int numberOfWorkers, MachineDefinition machineDefinition, boolean scalable) {
return this.addStage(
StageSchedulingInfo.builder()
.numberOfInstances(numberOfWorkers)
.machineDefinition(machineDefinition)
.scalable(scalable)
.build());
}
public Builder multiWorkerStage(
int numberOfWorkers, MachineDefinition machineDefinition, boolean scalable,
Map<String, String> containerAttributes) {
return this.addStage(
StageSchedulingInfo.builder()
.numberOfInstances(numberOfWorkers)
.machineDefinition(machineDefinition)
.scalable(scalable)
.containerAttributes(containerAttributes)
.build());
}
/**
* Setup current builder instance to use clone the stages from given stage info map and apply instance
* inheritance to each stage if the stage has inherit-config enabled or global force inheritance flag.
* Note: to add more stages to this builder, the number of stages needs to be adjusted accordingly along with
* calling other addStage methods.
* @param givenStages Source stages to be cloned from.
* @param getInstanceCountForStage Function to get inherited instance count for each stage.
* @param inheritEnabled Function to get whether a given stage has inherit-enabled.
* @param forceInheritance Global flag to force inheritance on all stages.
* @return Current builder instance.
*/
public Builder createWithInstanceInheritance(
Map<Integer, StageSchedulingInfo> givenStages,
Function<Integer, Optional<Integer>> getInstanceCountForStage,
Function<Integer, Boolean> inheritEnabled,
boolean forceInheritance) {
this.numberOfStages(givenStages.size());
givenStages.keySet().stream().sorted().forEach(k -> {
Optional<Integer> prevCntO = getInstanceCountForStage.apply(k);
StageSchedulingInfo resStage = givenStages.get(k);
if (prevCntO.isPresent() && (forceInheritance || inheritEnabled.apply(k))) {
resStage = givenStages.get(k).toBuilder()
.numberOfInstances(prevCntO.get())
.build();
}
// handle JobMaster stage
if (k == 0) { this.addJobMasterStage(resStage); }
else { this.addStage(resStage); }
});
return this;
}
public SchedulingInfo build() {
if (numberOfStages == 0) {
throw new IllegalArgumentException("Number of stages is 0, must be specified using builder.");
}
if (numberOfStages != builderStages.size()) {
throw new IllegalArgumentException("Missing scheduling information, number of stages: " + numberOfStages
+ " configured stages: " + builderStages.size());
}
return new SchedulingInfo(this);
}
}
}
| 7,796 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/descriptor/DeploymentStrategy.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.descriptor;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonInclude;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonInclude.Include;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Map;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.Singular;
import lombok.ToString;
@Builder
@EqualsAndHashCode
@ToString
public class DeploymentStrategy {
@Singular(ignoreNullCollections = true, value = "stage")
@Getter
private final Map<Integer, StageDeploymentStrategy> stageDeploymentStrategyMap;
/**
* If this field is not empty, it's used to indicate this is a resource cluster stack deployment and will be hosted
* on the given resource cluster.
*/
@Getter
@JsonInclude(Include.NON_NULL)
private final String resourceClusterId;
public DeploymentStrategy(
@JsonProperty("stageDeploymentStrategyMap") Map<Integer, StageDeploymentStrategy> stageDeploymentStrategyMap,
@JsonProperty("resourceClusterId") String resourceClusterId) {
this.stageDeploymentStrategyMap = stageDeploymentStrategyMap;
this.resourceClusterId = resourceClusterId;
}
public StageDeploymentStrategy forStage(int stageNum) {
if (!this.stageDeploymentStrategyMap.containsKey(stageNum)) { return null; }
return stageDeploymentStrategyMap.get(stageNum);
}
public boolean requireInheritInstanceCheck() {
return this.stageDeploymentStrategyMap != null && this.stageDeploymentStrategyMap.values().stream().anyMatch(StageDeploymentStrategy::isInheritInstanceCount);
}
public boolean requireInheritInstanceCheck(int stageNum) {
return this.stageDeploymentStrategyMap
!= null && this.stageDeploymentStrategyMap.containsKey(stageNum) && this.stageDeploymentStrategyMap.get(stageNum).isInheritInstanceCount();
}
}
| 7,797 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/descriptor/StageDeploymentStrategy.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.descriptor;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
@Builder
@ToString()
@Getter
@EqualsAndHashCode
public class StageDeploymentStrategy {
private final boolean inheritInstanceCount;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public StageDeploymentStrategy(
@JsonProperty("inheritInstanceCount") boolean inheritInstanceCount) {
this.inheritInstanceCount = inheritInstanceCount;
}
}
| 7,798 |
0 | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime | Create_ds/mantis/mantis-common/src/main/java/io/mantisrx/runtime/descriptor/StageScalingPolicy.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.runtime.descriptor;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonCreator;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import io.mantisrx.shaded.com.fasterxml.jackson.annotation.JsonProperty;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
public class StageScalingPolicy implements Serializable {
private static final long serialVersionUID = 1L;
private final int stage;
private final int min;
private final int max;
private final boolean enabled;
private final int increment;
private final int decrement;
private final long coolDownSecs;
private final Map<ScalingReason, Strategy> strategies;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public StageScalingPolicy(@JsonProperty("stage") int stage,
@JsonProperty("min") int min, @JsonProperty("max") int max,
@JsonProperty("increment") int increment, @JsonProperty("decrement") int decrement,
@JsonProperty("coolDownSecs") long coolDownSecs,
@JsonProperty("strategies") Map<ScalingReason, Strategy> strategies) {
this.stage = stage;
this.min = min;
this.max = Math.max(max, min);
enabled = min != max && strategies != null && !strategies.isEmpty();
this.increment = Math.max(increment, 1);
this.decrement = Math.max(decrement, 1);
this.coolDownSecs = coolDownSecs;
this.strategies = strategies == null ? new HashMap<ScalingReason, Strategy>() : new HashMap<>(strategies);
}
public int getStage() {
return stage;
}
public int getMin() {
return min;
}
public int getMax() {
return max;
}
public boolean isEnabled() {
return enabled;
}
public int getIncrement() {
return increment;
}
public int getDecrement() {
return decrement;
}
public long getCoolDownSecs() {
return coolDownSecs;
}
public Map<ScalingReason, Strategy> getStrategies() {
return Collections.unmodifiableMap(strategies);
}
@Override
public String toString() {
return "StageScalingPolicy{" +
"stage=" + stage +
", min=" + min +
", max=" + max +
", enabled=" + enabled +
", increment=" + increment +
", decrement=" + decrement +
", coolDownSecs=" + coolDownSecs +
", strategies=" + strategies +
'}';
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (int) (coolDownSecs ^ (coolDownSecs >>> 32));
result = prime * result + decrement;
result = prime * result + (enabled ? 1231 : 1237);
result = prime * result + increment;
result = prime * result + max;
result = prime * result + min;
result = prime * result + stage;
result = prime * result + ((strategies == null) ? 0 : strategies.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
StageScalingPolicy other = (StageScalingPolicy) obj;
if (coolDownSecs != other.coolDownSecs)
return false;
if (decrement != other.decrement)
return false;
if (enabled != other.enabled)
return false;
if (increment != other.increment)
return false;
if (max != other.max)
return false;
if (min != other.min)
return false;
if (stage != other.stage)
return false;
if (strategies == null) {
if (other.strategies != null)
return false;
} else if (!strategies.equals(other.strategies))
return false;
return true;
}
public enum ScalingReason {
CPU,
Memory,
Network,
DataDrop,
KafkaLag,
UserDefined,
KafkaProcessed,
Clutch,
ClutchExperimental,
ClutchRps,
RPS,
JVMMemory,
SourceJobDrop
}
@Getter
@ToString
@EqualsAndHashCode
public static class RollingCount implements Serializable {
private static final long serialVersionUID = 1L;
private final int count;
private final int of;
@JsonCreator
public RollingCount(@JsonProperty("count") int count, @JsonProperty("of") int of) {
this.count = count;
this.of = of;
}
}
@ToString
@Getter
@EqualsAndHashCode
public static class Strategy implements Serializable {
private static final long serialVersionUID = 1L;
private final ScalingReason reason;
private final double scaleDownBelowPct;
private final double scaleUpAbovePct;
private final RollingCount rollingCount;
@JsonCreator
@JsonIgnoreProperties(ignoreUnknown = true)
public Strategy(@JsonProperty("reason") ScalingReason reason,
@JsonProperty("scaleDownBelowPct") double scaleDownBelowPct,
@JsonProperty("scaleUpAbovePct") double scaleUpAbovePct,
@JsonProperty("rollingCount") RollingCount rollingCount) {
this.reason = reason;
this.scaleDownBelowPct = scaleDownBelowPct;
this.scaleUpAbovePct = Math.max(scaleDownBelowPct, scaleUpAbovePct);
this.rollingCount = rollingCount == null ? new RollingCount(1, 1) : rollingCount;
}
}
}
| 7,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.