index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/instrumentation/AccessMonitorUtil.java | package com.netflix.archaius.instrumentation;
import com.netflix.archaius.api.PropertyDetails;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
/** Tracks property usage data and flushes the data periodically to a sink. */
public class AccessMonitorUtil implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(AccessMonitorUtil.class);
// Map from property id to property usage data
private final ConcurrentHashMap<String, PropertyUsageData> propertyUsageMap;
// Map from stack trace to how many times that stack trace appeared
private final ConcurrentHashMap<String, Integer> stackTrace;
private static final AtomicInteger counter = new AtomicInteger();
private final ScheduledExecutorService executor;
private final Consumer<PropertiesInstrumentationData> dataFlushConsumer;
private final boolean recordStackTrace;
public static class Builder {
private Consumer<PropertiesInstrumentationData> dataFlushConsumer = null;
private boolean recordStackTrace = false;
private int initialFlushDelaySeconds = 30;
private int flushPeriodSeconds = 120;
public Builder setDataFlushConsumer(Consumer<PropertiesInstrumentationData> dataFlushConsumer) {
this.dataFlushConsumer = dataFlushConsumer;
return this;
}
public Builder setRecordStackTrace(boolean recordStackTrace) {
this.recordStackTrace = recordStackTrace;
return this;
}
public Builder setInitialFlushDelaySeconds(int initialFlushDelaySeconds) {
this.initialFlushDelaySeconds = initialFlushDelaySeconds;
return this;
}
public Builder setFlushPeriodSeconds(int flushPeriodSeconds) {
this.flushPeriodSeconds = flushPeriodSeconds;
return this;
}
public AccessMonitorUtil build() {
AccessMonitorUtil accessMonitorUtil = new AccessMonitorUtil(dataFlushConsumer, recordStackTrace);
accessMonitorUtil.startFlushing(initialFlushDelaySeconds, flushPeriodSeconds);
return accessMonitorUtil;
}
}
public static Builder builder() {
return new Builder();
}
private AccessMonitorUtil(
Consumer<PropertiesInstrumentationData> dataFlushConsumer,
boolean recordStackTrace) {
this.propertyUsageMap = new ConcurrentHashMap<>();
this.stackTrace = new ConcurrentHashMap<>();
this.dataFlushConsumer = dataFlushConsumer;
this.recordStackTrace = recordStackTrace;
this.executor = Executors.newSingleThreadScheduledExecutor(
runnable -> {
Thread thread = Executors.defaultThreadFactory().newThread(runnable);
thread.setDaemon(true);
thread.setName(String.format("Archaius-Instrumentation-Flusher-%d", counter.incrementAndGet()));
return thread;
});
}
private void startFlushing(int initialDelay, int period) {
if (!flushingEnabled()) {
LOG.info("Property usage data is being captured, but not flushed as there is no consumer specified.");
} else {
executor.scheduleWithFixedDelay(this::flushUsageData, initialDelay, period, TimeUnit.SECONDS);
}
}
private void flushUsageData() {
try {
if (flushingEnabled()) {
dataFlushConsumer.accept(new PropertiesInstrumentationData(getAndClearUsageMap()));
}
} catch (Exception e) {
LOG.warn("Failed to flush property instrumentation data", e);
}
}
/** Merge the results of given accessMonitorUtil into this one. */
public void merge(AccessMonitorUtil accessMonitorUtil) {
for (Map.Entry<String, PropertyUsageData> entry : accessMonitorUtil.propertyUsageMap.entrySet()) {
propertyUsageMap.putIfAbsent(entry.getKey(), entry.getValue());
}
for (Map.Entry<String, Integer> entry : accessMonitorUtil.stackTrace.entrySet()) {
stackTrace.merge(entry.getKey(), entry.getValue(), Integer::sum);
}
}
public void registerUsage(PropertyDetails propertyDetails) {
// Initially, we limit the number of events we keep to one event per property id per flush.
propertyUsageMap.putIfAbsent(
propertyDetails.getId(),
new PropertyUsageData(createEventList(new PropertyUsageEvent(System.currentTimeMillis()))));
// Very crude and will have a very noticeable performance impact, but is
// particularly useful for finding out call sites that iterate over all
// properties.
if (recordStackTrace) {
String trace = Arrays.toString(Thread.currentThread().getStackTrace());
stackTrace.merge(trace, 1, (v1, v2) -> v1 + 1);
}
}
private List<PropertyUsageEvent> createEventList(PropertyUsageEvent event) {
List<PropertyUsageEvent> list = new ArrayList<>();
list.add(event);
return list;
}
private Map<String, PropertyUsageData> getAndClearUsageMap() {
synchronized (propertyUsageMap) {
Map<String, PropertyUsageData> ret = getUsageMapImmutable();
propertyUsageMap.clear();
return ret;
}
}
public Map<String, PropertyUsageData> getUsageMapImmutable() {
return Collections.unmodifiableMap(new HashMap<>(propertyUsageMap));
}
public Map<String, Integer> getStackTracesImmutable() {
return Collections.unmodifiableMap(new HashMap<>(stackTrace));
}
public boolean flushingEnabled() {
return dataFlushConsumer != null;
}
@Override
public void close() {
executor.shutdown();
flushUsageData();
}
}
| 9,300 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/instrumentation/PropertyUsageData.java | package com.netflix.archaius.instrumentation;
import java.util.List;
/** Container for all usages of a specific property in a flush cycle. */
public class PropertyUsageData {
private List<PropertyUsageEvent> propertyUsageEvents;
public PropertyUsageData(List<PropertyUsageEvent> propertyUsageEvents) {
this.propertyUsageEvents = propertyUsageEvents;
}
public List<PropertyUsageEvent> getPropertyUsageEvents() {
return propertyUsageEvents;
}
}
| 9,301 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/instrumentation/PropertyUsageEvent.java | package com.netflix.archaius.instrumentation;
/** Container for data about a single property usage event. */
public class PropertyUsageEvent {
private final long usageTimeMillis;
public PropertyUsageEvent(long usageTimeMillis) {
this.usageTimeMillis = usageTimeMillis;
}
public long getUsageTimeMillis() {
return this.usageTimeMillis;
}
}
| 9,302 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/instrumentation/PropertiesInstrumentationData.java | package com.netflix.archaius.instrumentation;
import java.util.Map;
/** Instrumentation data snapshot for usages captured since the last flush. */
public class PropertiesInstrumentationData {
private final Map<String, PropertyUsageData> idToUsageDataMap;
public PropertiesInstrumentationData(Map<String, PropertyUsageData> idToUsageDataMap) {
this.idToUsageDataMap = idToUsageDataMap;
}
public Map<String, PropertyUsageData> getIdToUsageDataMap() {
return idToUsageDataMap;
}
}
| 9,303 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/cascade/ConcatCascadeStrategy.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.cascade;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import com.netflix.archaius.api.CascadeStrategy;
import com.netflix.archaius.api.StrInterpolator;
/**
* Given a list of parameters generate all combinations by progressively
* concatinating the next parameter
*
* ${name}-${param1}
* ${name}-${param1}-${param2}
* ${name}-${param1}-${param2}-${param3}
*
* @author elandau
*
*/
public class ConcatCascadeStrategy implements CascadeStrategy {
private static final String DEFAULT_SEPARATOR = "-";
private final List<String> parameters;
private final String separator;
public static ConcatCascadeStrategy from(String ... parameters) {
ArrayList<String> params = new ArrayList<String>();
Collections.addAll(params, parameters);
return new ConcatCascadeStrategy(params);
}
public ConcatCascadeStrategy(List<String> parameters) {
this.separator = DEFAULT_SEPARATOR;
this.parameters = new ArrayList<String>();
this.parameters.addAll(parameters);
}
public ConcatCascadeStrategy(String[] parameters) {
this(Arrays.asList(parameters));
}
public ConcatCascadeStrategy(String separator, List<String> parameters) {
this.separator = separator;
this.parameters = new ArrayList<String>();
this.parameters.addAll(parameters);
}
public ConcatCascadeStrategy(String separator, String[] parameters) {
this(separator, Arrays.asList(parameters));
}
@Override
public List<String> generate(String name, StrInterpolator interpolator, StrInterpolator.Lookup lookup) {
ArrayList<String> result = new ArrayList<String>();
result.add(name);
String current = name;
for (String param : parameters) {
current += separator + param;
result.add(interpolator.create(lookup).resolve(current));
}
return result;
}
}
| 9,304 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/cascade/InterpolatingCascadeStrategy.java | package com.netflix.archaius.cascade;
import java.util.ArrayList;
import java.util.List;
import com.netflix.archaius.api.CascadeStrategy;
import com.netflix.archaius.api.StrInterpolator;
import com.netflix.archaius.api.StrInterpolator.Lookup;
public abstract class InterpolatingCascadeStrategy implements CascadeStrategy {
@Override
public final List<String> generate(String resource, StrInterpolator interpolator, Lookup lookup) {
List<String> result = new ArrayList<>();
for (String option : getPermutations()) {
result.add(interpolator.create(lookup).resolve(String.format(option, resource)));
}
return result;
}
protected abstract List<String> getPermutations();
}
| 9,305 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/cascade/NoCascadeStrategy.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.cascade;
import java.util.ArrayList;
import java.util.List;
import com.netflix.archaius.api.CascadeStrategy;
import com.netflix.archaius.api.StrInterpolator;
/**
* Default 'noop' cascade strategy returns only the original resource name
*
* @author elandau
*
*/
public class NoCascadeStrategy implements CascadeStrategy {
public static final CascadeStrategy INSTANCE = new NoCascadeStrategy();
@Override
public List<String> generate(String name, StrInterpolator interpolator, StrInterpolator.Lookup config) {
List<String> list = new ArrayList<String>();
list.add(name);
return list;
}
}
| 9,306 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/visitor/PrintStreamVisitor.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.visitor;
import java.io.PrintStream;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.config.CompositeConfig;
public class PrintStreamVisitor implements CompositeConfig.CompositeVisitor<Void> {
private final PrintStream stream;
private String prefix = "";
public static PrintStreamVisitor OUT = new PrintStreamVisitor(System.out);
public PrintStreamVisitor(PrintStream stream) {
this.stream = stream;
}
public PrintStreamVisitor() {
this(System.out);
}
@Override
public Void visitKey(String key, Object value) {
stream.println(prefix + key + " = " + value);
return null;
}
@Override
public Void visitChild(String name, Config child) {
stream.println(prefix + "Config: " + name);
prefix += " ";
child.accept(this);
prefix = prefix.substring(0, prefix.length()-2);
return null;
}
}
| 9,307 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/visitor/PropertyOverrideVisitor.java | package com.netflix.archaius.visitor;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Stack;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.config.CompositeConfig;
/**
* Produce an ordered LinkedHashMap with all instances of a property in the configuration
* hierarchy with the key being the 'path' to the property.
* @author elandau
*
*/
public class PropertyOverrideVisitor implements CompositeConfig.CompositeVisitor<LinkedHashMap<String, String>> {
public static PrintStreamVisitor OUT = new PrintStreamVisitor(System.out);
private static final String SEPARATOR = "/";
private final Stack<String> stack = new Stack<>();
private final String key;
private final LinkedHashMap<String, String> hierarchy = new LinkedHashMap<>();
public PropertyOverrideVisitor(String key) {
this.key = key;
}
@Override
public LinkedHashMap<String, String> visitKey(String key, Object value) {
return hierarchy;
}
@Override
public LinkedHashMap<String, String> visitChild(String name, Config child) {
stack.push(name);
if (child instanceof CompositeConfig) {
child.accept(this);
}
else {
Object value = child.getRawProperty(key);
if (value != null) {
hierarchy.put(join(stack, SEPARATOR), value.toString());
}
}
stack.pop();
return hierarchy;
}
private static String join(Collection<String> values, String sep) {
StringBuilder sb = new StringBuilder();
Iterator<String> iter = values.iterator();
while (iter.hasNext()) {
sb.append(iter.next());
if (iter.hasNext()) {
sb.append(sep);
}
}
return sb.toString();
}
} | 9,308 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/visitor/SLF4JConfigVisitor.java | package com.netflix.archaius.visitor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.config.CompositeConfig;
public class SLF4JConfigVisitor implements CompositeConfig.CompositeVisitor<Void> {
private static final Logger LOG = LoggerFactory.getLogger(SLF4JConfigVisitor.class);
private final String INDENT_STR = " ";
private String currentIndent = "";
@Override
public Void visitKey(String key, Object value) {
LOG.debug(currentIndent + key + " = " + value);
return null;
}
@Override
public Void visitChild(String name, Config child) {
LOG.debug(currentIndent + "Config: " + name);
currentIndent += INDENT_STR;
child.accept(this);
currentIndent = currentIndent.substring(0, currentIndent.length() - INDENT_STR.length());
return null;
}
}
| 9,309 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/visitor/FlattenedNamesVisitor.java | package com.netflix.archaius.visitor;
import java.util.ArrayList;
import java.util.List;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.config.CompositeConfig;
/**
* Produce a flattened list of the configuration hierarchy in the order in which properties
* will be resolved. Note that the list won't show the actual hierarchy and may contain
* duplicate names if the same name is used in different child nodes.
*/
public class FlattenedNamesVisitor implements CompositeConfig.CompositeVisitor<List<String>> {
private final List<String> names = new ArrayList<>();
@Override
// This will never be called
public List<String> visitKey(String key, Object value) {
return names;
}
@Override
public List<String> visitChild(String name, Config child) {
names.add(name);
if (child instanceof CompositeConfig) {
child.accept(this);
}
return names;
}
}
| 9,310 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/interpolate/CommonsStrInterpolator.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.interpolate;
import org.apache.commons.lang3.text.StrLookup;
import org.apache.commons.lang3.text.StrSubstitutor;
import com.netflix.archaius.api.StrInterpolator;
public final class CommonsStrInterpolator implements StrInterpolator {
public static final CommonsStrInterpolator INSTANCE = new CommonsStrInterpolator();
private CommonsStrInterpolator() {
}
@Override
public Context create(final Lookup lookup) {
final StrSubstitutor sub = new StrSubstitutor(
new StrLookup<String>() {
@Override
public String lookup(String key) {
return lookup.lookup(key);
}
}, "${", "}", '$').setValueDelimiter(":");
sub.setEnableSubstitutionInVariables(true);
return new Context() {
@Override
public String resolve(String value) {
return sub.replace(value);
}
};
}
}
| 9,311 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/interpolate/ConfigStrLookup.java | package com.netflix.archaius.interpolate;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.StrInterpolator.Lookup;
/**
* Interpolator lookup using a Config as the source
* @author elandau
*
*/
public class ConfigStrLookup implements Lookup {
private Config config;
public ConfigStrLookup(Config config) {
this.config = config;
}
@Override
public String lookup(String key) {
Object value = config.getRawProperty(key);
if (value == null) {
return null;
}
return value.toString();
}
public static Lookup from(Config config) {
return new ConfigStrLookup(config);
}
}
| 9,312 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/readers/PropertiesConfigReader.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.readers;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.ConfigReader;
import com.netflix.archaius.api.StrInterpolator;
import com.netflix.archaius.api.config.CompositeConfig;
import com.netflix.archaius.api.exceptions.ConfigException;
import com.netflix.archaius.config.DefaultCompositeConfig;
import com.netflix.archaius.config.DefaultCompositeConfig.Builder;
import com.netflix.archaius.config.MapConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.net.URLDecoder;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
public class PropertiesConfigReader implements ConfigReader {
private static final Logger LOG = LoggerFactory.getLogger(PropertiesConfigReader.class);
private static final String[] INCLUDE_KEYS = { "@next", "netflixconfiguration.properties.nextLoad" };
private static final String SUFFIX = ".properties";
@Override
public Config load(ClassLoader loader, String resourceName, StrInterpolator strInterpolator, StrInterpolator.Lookup lookup) throws ConfigException {
Builder builder = DefaultCompositeConfig.builder();
Collection<URL> resources = getResources(loader, resourceName);
if (resources.size() > 1) {
LOG.warn("Multiple resource files found for {}. {}." +
" All resources will be loaded with override order undefined.",
resourceName, resources);
}
for (URL url : resources) {
builder.withConfig(url.toString(), load(loader, url, strInterpolator, lookup));
}
CompositeConfig config = builder.build();
if (config.getConfigNames().isEmpty()) {
throw new ConfigException("No resources found for '" + resourceName + SUFFIX + "'");
}
return config;
}
@Override
public Config load(ClassLoader loader, URL url, StrInterpolator strInterpolator, StrInterpolator.Lookup lookup) throws ConfigException {
Properties props = new Properties();
internalLoad(props, new HashSet<String>(), loader, url, strInterpolator, lookup);
return MapConfig.from(props);
}
private void internalLoad(Properties props, Set<String> seenUrls, ClassLoader loader, URL url, StrInterpolator strInterpolator, StrInterpolator.Lookup lookup) {
LOG.debug("Attempting to load : {}", url.toExternalForm());
// Guard against circular dependencies
if (!seenUrls.contains(url.toExternalForm())) {
seenUrls.add(url.toExternalForm());
try {
// Load properties into the single Properties object overriding any property
// that may already exist
Map<String, String> p = new URLConfigReader(url).call().getToAdd();
LOG.debug("Loaded : {}", url.toExternalForm());
props.putAll(p);
// Recursively load any files referenced by one of several 'include' properties
// in the file. The property value contains a list of URL's to load, where the
// last loaded file wins for any individual property collisions.
for (String nextLoadPropName : INCLUDE_KEYS) {
String nextLoadValue = (String)props.remove(nextLoadPropName);
if (nextLoadValue != null) {
for (String urlString : nextLoadValue.split(",")) {
for (URL nextUrl : getResources(loader, strInterpolator.create(lookup).resolve(urlString))) {
internalLoad(props, seenUrls, loader, nextUrl, strInterpolator, lookup);
}
}
}
}
} catch (IOException e) {
LOG.debug("Unable to load configuration file {}. {}", url, e.getMessage());
}
}
else {
LOG.debug("Circular dependency trying to load url : {}", url.toExternalForm());
}
}
@Override
public boolean canLoad(ClassLoader loader, String name) {
return getResources(loader, name) != null;
}
@Override
public boolean canLoad(ClassLoader loader, URL uri) {
return uri.getPath().endsWith(SUFFIX);
}
private static Collection<URL> getResources(ClassLoader loader, String resourceName) {
LinkedHashSet<URL> resources = new LinkedHashSet<URL>();
if (!resourceName.endsWith(SUFFIX)) {
resourceName += SUFFIX;
}
// attempt to load from the context classpath
if (loader == null) {
loader = Thread.currentThread().getContextClassLoader();
}
if (loader != null) {
try {
resources.addAll(Collections.list(loader.getResources(resourceName)));
} catch (IOException e) {
LOG.debug("Failed to load resources for {}", resourceName, e);
}
}
try {
resources.addAll(Collections.list(ClassLoader.getSystemResources(resourceName)));
} catch (IOException e) {
LOG.debug("Failed to load resources for {}", resourceName, e);
}
try {
resourceName = URLDecoder.decode(resourceName, "UTF-8");
File file = new File(resourceName);
if (file.exists()) {
resources.add(file.toURI().toURL());
}
} catch (Exception e) {
LOG.debug("Failed to load resources for {}", resourceName, e);
}
return resources;
}
}
| 9,313 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/readers/JDCConfigReader.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.readers;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Callable;
import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JDCConfigReader implements Callable<Map<String, Object>> {
private static Logger log = LoggerFactory.getLogger(JDCConfigReader.class);
// The datasource to connect to the database.
private DataSource datasource;
// The JDBC query to obtain properties stored in an RDBMS.
// example:
// "Select distinct property_key, property_value from SITE_PROPERTIES"
private String query;
// The column containing the keys
private String keyColumnName = "property_key";
// The column containing the values.
private String valueColumnName = "property_value";
/**
* Constructor
*
* @param datasource
* The DataSource object for the JDBC; e.g.
* <code>new OracleDataSource(databaseName, schema);</code>
* @param query
* The query statement to fetch the properties; e.g.
* <code>"Select distinct property_key, property_value from SITE_PROPERTIES"</code>
* @param keyColumnName
* The column name which stores the property keys; e.g.
* <code>property_key</code>
* @param valueColumnName
* The column name which stores the property values; e.g.
* <code>property_value</code>
*/
public JDCConfigReader(DataSource datasource, String query, String keyColumnName, String valueColumnName) {
this.datasource = datasource;
this.query = query;
this.keyColumnName = keyColumnName;
this.valueColumnName = valueColumnName;
}
// ...
@Override
public Map<String, Object> call() throws Exception {
Map<String, Object> map = new HashMap<String, Object>();
Connection conn = null;
PreparedStatement pstmt = null;
ResultSet rs = null;
try {
conn = getConnection();
pstmt = conn.prepareStatement(query.toString());
rs = pstmt.executeQuery();
while (rs.next()) {
String key = (String) rs.getObject(keyColumnName);
Object value = rs.getObject(valueColumnName);
map.put(key, value);
}
} catch (SQLException e) {
throw e;
} finally {
close(conn, pstmt, rs);
}
return map;
}
/**
* Returns the used <code>DataSource</code> object.
*
* @return the data source
* @since 1.4
*/
public DataSource getDatasource() {
return datasource;
}
/**
* Returns a <code>Connection</code> object. This method is called when ever
* the database is to be accessed. This implementation returns a connection
* from the current <code>DataSource</code>.
*
* @return the <code>Connection</code> object to be used
* @throws SQLException
* if an error occurs
* @since 1.4
*/
protected Connection getConnection() throws SQLException {
return getDatasource().getConnection();
}
/**
* Close a <code>Connection</code> and, <code>Statement</code>. Avoid
* closing if null and hide any SQLExceptions that occur.
*
* @param conn
* The database connection to close
* @param stmt
* The statement to close
*/
private void close(Connection conn, Statement stmt, ResultSet rs) {
try {
if (rs != null) {
rs.close();
}
} catch (SQLException e) {
log.error("An error occured on closing the ResultSet", e);
}
try {
if (stmt != null) {
stmt.close();
}
} catch (SQLException e) {
log.error("An error occured on closing the statement", e);
}
try {
if (conn != null) {
conn.close();
}
} catch (SQLException e) {
log.error("An error occured on closing the connection", e);
}
}
}
| 9,314 |
0 | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-core/src/main/java/com/netflix/archaius/readers/URLConfigReader.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.readers;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URL;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.Callable;
import com.netflix.archaius.config.polling.PollingResponse;
public class URLConfigReader implements Callable<PollingResponse> {
private final URL[] configUrls;
/**
* Create an instance with a list URLs to be used.
*
* @param urls list of URLs to be used
*/
public URLConfigReader(String... urls) {
configUrls = createUrls(urls);
}
/**
* Create an instance with a list URLs to be used.
*
* @param urls list of URLs to be used
*/
public URLConfigReader(URL... urls) {
configUrls = urls;
}
private static URL[] createUrls(String... urlStrings) {
if (urlStrings == null || urlStrings.length == 0) {
throw new IllegalArgumentException("urlStrings is null or empty");
}
URL[] urls = new URL[urlStrings.length];
try {
for (int i = 0; i < urls.length; i++) {
urls[i] = new URL(urlStrings[i]);
}
} catch (Throwable e) {
throw new RuntimeException(e);
}
return urls;
}
@Override
public PollingResponse call() throws IOException {
final Map<String, String> map = new HashMap<String, String>();
for (URL url: configUrls) {
Properties props = new Properties();
InputStream fin = url.openStream();
InputStreamReader reader;
try {
reader = new InputStreamReader(fin, "UTF-8");
try {
props.load(reader);
}
finally {
if (reader != null) {
reader.close();
}
}
}
finally {
fin.close();
}
for (Entry<Object, Object> entry: props.entrySet()) {
map.put((String) entry.getKey(), entry.getValue().toString());
}
}
return new PollingResponse() {
@Override
public Map<String, String> getToAdd() {
return map;
}
@Override
public Collection<String> getToRemove() {
return Collections.emptyList();
}
@Override
public boolean hasData() {
return true;
}
};
}
public List<URL> getConfigUrls() {
return Collections.unmodifiableList(Arrays.asList(configUrls));
}
@Override
public String toString() {
return "FileConfigurationSource [fileUrls=" + Arrays.toString(configUrls)
+ "]";
}
}
| 9,315 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/Layers.java | package com.netflix.archaius;
import com.netflix.archaius.api.Layer;
public final class Layers {
public static Layer TEST = Layer.of("test", 100);
public static Layer RUNTIME = Layer.of("runtime", 200);
public static Layer SYSTEM = Layer.of("system", 300);
public static Layer ENVIRONMENT = Layer.of("environment", 400);
public static Layer REMOTE = Layer.of("remote", 500);
public static Layer APPLICATION_OVERRIDE = Layer.of("application-override", 600);
public static Layer APPLICATION = Layer.of("application", 700);
public static Layer LIBRARY = Layer.of("library", 800);
public static Layer DEFAULT = Layer.of("default", 900);
private Layers() {
}
} | 9,316 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/PropertyRepository.java | package com.netflix.archaius.api;
import java.lang.reflect.Type;
import java.util.List;
import java.util.Map;
import java.util.Set;
public interface PropertyRepository {
/**
* Fetch a property of a specific type. A {@link Property} object is returned regardless of
* whether a key for it exists in the backing configuration. The {@link Property} is attached
* to a dynamic configuration system and will have its value automatically updated
* whenever the backing configuration is updated. Fallback properties and default values
* may be specified through the {@link Property} API.
* <p>
* This method does not handle polymorphic return types such as collections. Use {@link #get(String, Type)} or one
* of the specialized utility methods in the interface for that case.
*
* @param key Property name
* @param type The type for the property value. This *can* be an array type, but not a primitive array
* (ie, you can use {@code Integer[].class} but not {@code int[].class})
*/
<T> Property<T> get(String key, Class<T> type);
/**
* Fetch a property of a specific type. A {@link Property} object is returned regardless of
* whether a key for it exists in the backing configuration. The {@link Property} is attached
* to a dynamic configuration system and will have its value automatically updated
* whenever the backing configuration is updated. Fallback properties and default values
* may be specified through the {@link Property} API.
* <p>
* Use this method to request polymorphic return types such as collections. See the utility methods in
* {@link ArchaiusType} to get types for lists, sets and maps, or call the utility methods in this interface directly.
*
* @see ArchaiusType#forListOf(Class)
* @see ArchaiusType#forSetOf(Class)
* @see ArchaiusType#forMapOf(Class, Class)
* @param key Property name
* @param type Type of property value.
*/
<T> Property<T> get(String key, Type type);
/**
* Fetch a property with a {@link List} value. This is just an utility wrapper around {@link #get(String, Type)}.
* See that method's documentation for more details.
*/
default <V> Property<List<V>> getList(String key, Class<V> listElementType) {
return get(key, ArchaiusType.forListOf(listElementType));
}
/**
* Fetch a property with a {@link Set} value. This is just an utility wrapper around {@link #get(String, Type)}.
* See that method's documentation for more details.
*/
default <V> Property<Set<V>> getSet(String key, Class<V> setElementType) {
return get(key, ArchaiusType.forSetOf(setElementType));
}
/**
* Fetch a property with a {@link Map} value. This is just an utility wrapper around {@link #get(String, Type)}.
* See that method's documentation for more details.
*/
default <K, V> Property<Map<K, V>> getMap(String key, Class<K> mapKeyType, Class<V> mapValueType) {
return get(key, ArchaiusType.forMapOf(mapKeyType, mapValueType));
}
}
| 9,317 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/PropertySource.java | package com.netflix.archaius.api;
import java.util.Optional;
import java.util.function.BiConsumer;
/**
* Contract for a raw source of properties
*/
public interface PropertySource {
/**
* Get the raw property value. No interpolation or other modification is done to the property.
* @param key
*/
default Optional<Object> getProperty(String key) { return Optional.empty(); }
/**
* Get the raw property value, but do not record any usage data.
* @param key
*/
default Optional<Object> getPropertyUninstrumented(String key) {
return getProperty(key);
}
/**
* Mechanism for consuming all properties of the PropertySource
* @param consumer
*/
default void forEachProperty(BiConsumer<String, Object> consumer) {}
/**
* Mechanism for consuming all properties of the PropertySource that also avoids any usage tracking
* @param consumer
*/
default void forEachPropertyUninstrumented(BiConsumer<String, Object> consumer) {
forEachProperty(consumer);
}
/**
* @return Name used to identify the source such as a filename.
*/
default String getName() { return "unnamed"; }
/**
* @return True if empty or false otherwise.
*/
boolean isEmpty();
}
| 9,318 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/ArchaiusType.java | package com.netflix.archaius.api;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
/**
* An implementation of {@link ParameterizedType} that can represent the collection types that Archaius can
* handle with the default property value decoders, plus static utility methods for list, set and map types.
*
* @see PropertyRepository#getList(String, Class)
* @see PropertyRepository#getSet(String, Class)
* @see PropertyRepository#getMap(String, Class, Class)
* @see Config#get(Type, String)
* @see Config#get(Type, String, Object)
*/
public class ArchaiusType implements ParameterizedType {
/** Return a parameterizedType to represent a {@code List<listValuesType>} */
public static ParameterizedType forListOf(Class<?> listValuesType) {
Class<?> maybeWrappedType = PRIMITIVE_WRAPPERS.getOrDefault(listValuesType, listValuesType);
return new ArchaiusType(List.class, new Class<?>[] { maybeWrappedType });
}
/** Return a parameterizedType to represent a {@code Set<setValuesType>} */
public static ParameterizedType forSetOf(Class<?> setValuesType) {
Class<?> maybeWrappedType = PRIMITIVE_WRAPPERS.getOrDefault(setValuesType, setValuesType);
return new ArchaiusType(Set.class, new Class<?>[] { maybeWrappedType });
}
/** Return a parameterizedType to represent a {@code Map<mapKeysType, mapValuesType>} */
public static ParameterizedType forMapOf(Class<?> mapKeysTpe, Class<?> mapValuesType) {
Class<?> maybeWrappedKeyType = PRIMITIVE_WRAPPERS.getOrDefault(mapKeysTpe, mapKeysTpe);
Class<?> maybeWrappedValuesType = PRIMITIVE_WRAPPERS.getOrDefault(mapValuesType, mapValuesType);
return new ArchaiusType(Map.class, new Class<?>[] {maybeWrappedKeyType, maybeWrappedValuesType});
}
private final static Map<Class<?> /*primitive*/, Class<?> /*wrapper*/> PRIMITIVE_WRAPPERS;
static {
Map<Class<?>, Class<?>> wrappers = new HashMap<>();
wrappers.put(Integer.TYPE, Integer.class);
wrappers.put(Long.TYPE, Long.class);
wrappers.put(Double.TYPE, Double.class);
wrappers.put(Float.TYPE, Float.class);
wrappers.put(Boolean.TYPE, Boolean.class);
wrappers.put(Character.TYPE, Character.class);
wrappers.put(Byte.TYPE, Byte.class);
wrappers.put(Short.TYPE, Short.class);
wrappers.put(Void.TYPE, Void.class);
PRIMITIVE_WRAPPERS = Collections.unmodifiableMap(wrappers);
}
private final Class<?> rawType;
private final Class<?>[] typeArguments;
private ArchaiusType(Class<?> rawType, Class<?>[] typeArguments) {
this.rawType = Objects.requireNonNull(rawType);
this.typeArguments = Objects.requireNonNull(typeArguments);
if (rawType.isArray()
|| rawType.isPrimitive()
|| rawType.getTypeParameters().length != typeArguments.length) {
throw new IllegalArgumentException("The provided rawType and arguments don't look like a supported parameterized type");
}
}
@Override
public Type[] getActualTypeArguments() {
return typeArguments;
}
@Override
public Type getRawType() {
return rawType;
}
@Override
public Type getOwnerType() {
return null;
}
@Override
public String toString() {
String typeArgumentNames = Arrays.stream(typeArguments).map(Class::getSimpleName).collect(Collectors.joining(","));
return String.format("parameterizedType for %s<%s>", rawType.getSimpleName(), typeArgumentNames);
}
}
| 9,319 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/Property.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
/**
* API for composeable property access with optional chaining with default value support
* as well as change notification.
*
* A {@link PropertyRepository} implementation normally implements some level of caching
* to reduce the overhead of interpolating and converting values.
*
* {@code
* class MyService {
* private final Property<String> prop;
*
* MyService(PropertyRepository repository) {
* prop = repository.get("foo.prop", String.class).orElse("defaultValue");
* }
*
* public void doSomething() {
* String currentValue = prop.get();
* }
* }
* }
*
* @param <T>
*/
public interface Property<T> extends Supplier<T> {
/**
* Token returned when calling onChange through which change notification can be
* unsubscribed.
*/
interface Subscription {
void unsubscribe();
}
/**
* Return the most recent value of the property.
*
* @return Most recent value for the property
*/
@Override
T get();
/**
* Add a listener that will be called whenever the property value changes
* @param listener
*/
@Deprecated
default void addListener(PropertyListener<T> listener) {
onChange(new Consumer<T>() {
@Override
public void accept(T t) {
listener.accept(t);
}
});
}
/**
* Remove a listener previously registered by calling addListener
* @param listener
*/
@Deprecated
default void removeListener(PropertyListener<T> listener) {}
/**
* @deprecated Use {@link Property#subscribe(Consumer)}
* @param consumer
*/
@Deprecated
default Subscription onChange(Consumer<T> consumer) {
return subscribe(consumer);
}
/**
* Subscribe for notification whenever the property value changes.
* {@link Property#onChange(Consumer)} should be called last when chaining properties
* since the notification only applies to the state of the chained property
* up until this point. Changes to subsequent Property objects returned from {@link Property#orElse}
* or {@link Property#map(Function)} will not trigger calls to this consumer.
* @param consumer
* @return Subscription that may be unsubscribed to no longer get change notifications
*/
default Subscription subscribe(Consumer<T> consumer) {
PropertyListener<T> listener = new PropertyListener<T>() {
@Override
public void onChange(T value) {
consumer.accept(value);
}
};
addListener(listener);
return () -> removeListener(listener);
}
/**
* Create a new Property object that will return the specified defaultValue if
* this object's property is not found.
* @param defaultValue
* @return Newly constructed Property object
*/
default Property<T> orElse(T defaultValue) {
throw new UnsupportedOperationException();
}
/**
* Create a new Property object that will fetch the property backed by the provided
* key. The return value of the supplier will be cached until the configuration has changed
*
* @param key
* @return Newly constructed Property object
*/
default Property<T> orElseGet(String key) {
throw new UnsupportedOperationException();
}
/**
* Create a new Property object that will map the current object's property value
* to a new type. The return value of the mapper will be cached until the
* configuration has changed.
*
* Note that no orElseGet() calls may be made on a mapped property
*
* @param delegate
* @return Newly constructed Property object
*/
default <S> Property<S> map(Function<T, S> mapper) {
throw new UnsupportedOperationException();
}
/**
* @return Key or path to the property
*/
String getKey();
}
| 9,320 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/TypeConverter.java | package com.netflix.archaius.api;
import java.lang.reflect.Type;
import java.util.Optional;
/**
* Encapsulates conversion of a single string value to a type
* @param <T>
*/
public interface TypeConverter<T> {
/**
* High level container from which to resolve a Type to a TypeConverter. A repository normally contains
* several {@link TypeConverter.Factory}s
*/
interface Registry {
Optional<TypeConverter<?>> get(Type type);
}
/**
* Factory used to resolve a type to a TypeConverter. Multiple factories may be used to support different
* types, including generics.
*/
interface Factory {
Optional<TypeConverter<?>> get(Type type, Registry registry);
}
/**
* Convert a string to the requested type
* @param value
* @return
*/
T convert(String value);
}
| 9,321 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/Decoder.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
import java.lang.reflect.Type;
/**
* API for decoding properties to arbitrary types.
*
* @author spencergibb
*/
public interface Decoder {
@Deprecated
<T> T decode(Class<T> type, String encoded);
default <T> T decode(Type type, String value) {
if (type instanceof Class) {
return decode((Class<T>)type, value);
} else {
throw new UnsupportedOperationException("This decoder " + getClass() + " does not support Type");
}
}
}
| 9,322 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/StrInterpolatorFactory.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
/**
* SPI for specifying the {@link StrInterpolator} type used by ConfigManager.
* This factory exists since the root config doesn't exist yet when the
* interpolator type is added to the {@link DefaultAppConfig#Builder}
*
* @author elandau
*
*/
public interface StrInterpolatorFactory {
StrInterpolator create(Config rootConfig);
}
| 9,323 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/CascadeStrategy.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
import java.util.List;
/**
* Strategy for determining a set of cascading resource names. The strategy will resolve
* a single resource name into an ordered list of alternative names.
*
* For example, a strategy may specify that additional configuration files may be loaded
* based on environment and datacenter. The strategy will return the list,
*
* basename
* basename-${environment}
* basename-${datacenter}
* basename-$[environment}-${datacenter}
*
* @author elandau
*/
public interface CascadeStrategy {
/**
* Resolve a resource name to multiple alternative names.
*
* @param interpolator Interpolator for variable replacements
* @param resource The resource name
*
* @return List of all names including the original name
*/
List<String> generate(String resource, StrInterpolator interpolator, StrInterpolator.Lookup lookup);
}
| 9,324 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/ConfigLoader.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
import java.io.File;
import java.net.URL;
import java.util.Properties;
import com.netflix.archaius.api.config.CompositeConfig;
import com.netflix.archaius.api.exceptions.ConfigException;
/**
* SPI for loading configurations. The ConfigLoader provides a DSL
* @author elandau
*
*/
public interface ConfigLoader {
/**
* DSL for loading a configuration
*
* @author elandau
*
*/
public static interface Loader {
/**
* Cascading policy to use the loading based on a resource name. All loaded
* files will be merged into a single Config.
* @param strategy
*/
Loader withCascadeStrategy(CascadeStrategy strategy);
/**
* Class loader to use
* @param loader
*/
Loader withClassLoader(ClassLoader loader);
/**
* @deprecated Requiring the existence of a configuration file seems excessive
*/
@Deprecated
Loader withFailOnFirst(boolean flag);
/**
* Externally provided property overrides that are applied once
* all cascaded files have been loaded
*
* @param props
*/
Loader withOverrides(Properties props);
/**
* Externally provided property overrides that are applied once
* all cascaded files have been loaded
*
* @param config
*/
Loader withOverrides(Config config);
/**
* Load configuration by cascade resource name.
*
* @param resourceName
* @return CompositeConfig contains a full hierarchy of cascaded files
*/
CompositeConfig load(String resourceName) throws ConfigException;
/**
* Load configuration from a specific URL
* @param url
* @return
*/
Config load(URL url) throws ConfigException;
/**
* Load configuration from a specific file
* @param file
* @return
* @throws ConfigException
*/
Config load(File file) throws ConfigException;
}
Loader newLoader();
}
| 9,325 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/ConfigReader.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
import java.net.URL;
import com.netflix.archaius.api.exceptions.ConfigException;
/**
* Contract for a configuration file loader. A ConfigManager will likely be configured with
* multiple configuration loaders, each responsible for loading a specific configuration
* format and loading from a specific location.
*
* TODO: Consider splitting load(resource) into a separate abstraction
*
* @author elandau
*
*/
public interface ConfigReader {
/**
* Load configuration from a simple resource name. A concrete ConfigLoader will need to add
* location and type information to this resource.
*
* For example, an WebAppConfigurationLoader will attempt load to the configuration from
* resourceName : 'application-prod'
*
* /WEB-INF/confg/application-prod.properties
*
* @param resourceName
* @return
*/
Config load(ClassLoader loader, String resourceName, StrInterpolator strInterpolator, StrInterpolator.Lookup lookup) throws ConfigException ;
/**
* Load a specific URL. The URL is assumed to be fully formed. The concrete ConfigLoader will
* only need to check that the extension is supported (ex .properties)
*
* @param name
* @return
*/
Config load(ClassLoader loader, URL url, StrInterpolator strInterpolator, StrInterpolator.Lookup lookup) throws ConfigException;
/**
* Determine if this reader can load the provided resource name
*
* @param resourceName
* @return
*/
boolean canLoad(ClassLoader loader, String resourceName);
/**
* Determine if this reader can load the provided url
* @param loader
* @param uri
* @return
*/
boolean canLoad(ClassLoader loader, URL uril);
}
| 9,326 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/IoCContainer.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
/**
* Interface used by ConfigBinder to integrate with a DI framework that
* allows for named injection. This integration enables binding a string
* value for a type to a DI bound instance.
*
* @author elandau
*
*/
public interface IoCContainer {
/**
* @param name
* @param type
* @return Return the instance for type T bound to 'name'
*/
public <T> T getInstance(String name, Class<T> type);
}
| 9,327 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/Config.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
import java.lang.reflect.Type;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
/**
* Core API for reading a configuration. The API is read only.
*/
@SuppressWarnings("JavadocDeclaration") // TODO: Fix up all the javadocs and remove this suppression
public interface Config extends PropertySource {
/**
* Interface for a visitor visiting all key, value pairs.
* <p>
* Visitors should not have consequences based on specific key-value pairs and in general
* should be used primarily for logging purposes.
* <p>
* Notably, instrumentation is by default disabled on visitors, meaning that if there are
* visitors that result in consequences based on specific key-value pairs, it is possible
* that they are still registered as unused and cleaned up, resulting in an unintended
* code behavior change.
*
* @param <T>
*/
interface Visitor<T> {
T visitKey(String key, Object value);
}
/**
* Register a listener that will receive a call for each property that is added, removed
* or updated. It is recommended that the callbacks be invoked only after a full refresh
* of the properties to ensure they are in a consistent state.
*
* @param listener
*/
void addListener(ConfigListener listener);
/**
* Remove a previously registered listener.
* @param listener
*/
void removeListener(ConfigListener listener);
/**
* Return the raw, un-interpolated, object associated with a key.
* @param key
*/
Object getRawProperty(String key);
/**
* Returns the raw object associated with a key, but without reporting on its usage. Only relevant for configs that
* support property instrumentation.
* @param key
*/
default Object getRawPropertyUninstrumented(String key) { return getRawProperty(key); }
@Override
default Optional<Object> getProperty(String key) { return Optional.ofNullable(getRawProperty(key)); }
@Override
default Optional<Object> getPropertyUninstrumented(String key) {
return Optional.ofNullable(getRawPropertyUninstrumented(key));
}
default void recordUsage(PropertyDetails propertyDetails) {
throw new UnsupportedOperationException("Property usage instrumentation not supported for this config type.");
}
/** Returns whether a config is recording usage on the standard property endpoints. */
default boolean instrumentationEnabled() {
return false;
}
/**
* Parse the property as a long.
* @param key
*/
Long getLong(String key);
/**
* Parse the property as a long but return a default if no property defined or the
* property cannot be parsed successfully.
* @param key
* @param defaultValue
* @return
*/
Long getLong(String key, Long defaultValue);
String getString(String key);
String getString(String key, String defaultValue);
Double getDouble(String key);
Double getDouble(String key, Double defaultValue);
Integer getInteger(String key);
Integer getInteger(String key, Integer defaultValue);
Boolean getBoolean(String key);
Boolean getBoolean(String key, Boolean defaultValue);
Short getShort(String key);
Short getShort(String key, Short defaultValue);
BigInteger getBigInteger(String key);
BigInteger getBigInteger(String key, BigInteger defaultValue);
BigDecimal getBigDecimal(String key);
BigDecimal getBigDecimal(String key, BigDecimal defaultValue);
Float getFloat(String key);
Float getFloat(String key, Float defaultValue);
Byte getByte(String key);
Byte getByte(String key, Byte defaultValue);
/**
* Get the property as a list. Depending on the underlying implementation the list
* may be derived from a comma delimited string or from an actual list structure.
* @param key
* @return
*/
List<?> getList(String key);
<T> List<T> getList(String key, Class<T> type);
List<?> getList(String key, List<?> defaultValue);
/**
* Get the property from the Decoder. All basic data types as well any type
* will a valueOf or String constructor will be supported.
* @param type
* @param key
* @return
*/
<T> T get(Class<T> type, String key);
/**
* Get the property from the Decoder. All basic data types as well any type
* will a valueOf or String constructor will be supported.
* @param type
* @param key
* @return
*/
<T> T get(Class<T> type, String key, T defaultValue);
/**
* Get the property from the Decoder. Use this method for polymorphic types such as collections.
* <p>
* Use the utility methods in {@link ArchaiusType} to get the types for lists, sets and maps.
*
* @see ArchaiusType#forListOf(Class)
* @see ArchaiusType#forSetOf(Class)
* @see ArchaiusType#forMapOf(Class, Class)
*/
<T> T get(Type type, String key);
/**
* Get the property from the Decoder. Use this method for polymorphic types such as collections.
* <p>
* Use the utility methods in {@link ArchaiusType} to get the types for lists, sets and maps.
*
* @see ArchaiusType#forListOf(Class)
* @see ArchaiusType#forSetOf(Class)
* @see ArchaiusType#forMapOf(Class, Class)
*/
<T> T get(Type type, String key, T defaultValue);
/**
* @param key
* @return True if the key is contained within this or any of its child configurations
*/
boolean containsKey(String key);
/**
* @return An unmodifiable Iterator over all property names owned by this config
* @deprecated Use {@link #keys()} instead.
*/
@Deprecated
Iterator<String> getKeys();
/**
* Returns an unmodifiable Iterable of all property names owned by this config.
* <p>
* The default in this interface simply returns a thunk call to {@link #getKeys()}. Implementations are
* encouraged to provide their own version. The simplest approach, if the implementation has a {@link java.util.Map}
* or similar as its backing store, is to return an equivalent to
* <code>Collections.unmodifiableSet(map.keySet())</code>.
*/
default Iterable<String> keys() {
return this::getKeys;
}
/**
* @return Return an iterator over all prefixed property names owned by this config
*/
Iterator<String> getKeys(String prefix);
/**
* @param prefix
* @return Return a subset of the configuration prefixed by a key. A prefixed view is NOT independent of its parent
* config. In particular, setting the decoder or the string interpolator is not supported and causes unspecified
* behavior.
* @see #getPrivateView()
*/
Config getPrefixedView(String prefix);
/**
* @return A "private view" of this config. The returned object can have its own {@link Decoder},
* {@link StrInterpolator}, and {@link ConfigListener}s that will NOT be shared with the original config. Updates to
* the underlying config's entries WILL be visible and will generate events on any registered listener.
*/
Config getPrivateView();
/**
* Set the interpolator to be used. The interpolator is normally created from the top level
* configuration object and is passed down to any children as they are added. Setting the interpolator on a child
* config is not supported and causes unspecified behavior.
* @see #getPrivateView()
* @param interpolator
*/
void setStrInterpolator(StrInterpolator interpolator);
StrInterpolator getStrInterpolator();
/**
* Set the Decoder used by get() to parse any type. The decoder is normally created from the top level
* configuration object and is passed down to children as they are added. Setting the decoder on a child
* config is not supported and causes unspecified behavior.
* @see #getPrivateView()
* @param decoder
*/
void setDecoder(Decoder decoder);
Decoder getDecoder();
/**
* Visitor pattern
* @param visitor
*/
<T> T accept(Visitor<T> visitor);
default String resolve(String value) {
throw new UnsupportedOperationException();
}
default <T> T resolve(String value, Class<T> type) {
throw new UnsupportedOperationException();
}
}
| 9,328 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/PropertyDetails.java | package com.netflix.archaius.api;
import java.util.Objects;
/**
* Container class for any information about the property at usage time that is relevant for instrumentation purposes.
*/
public class PropertyDetails {
private final String key;
private final String id;
private final Object value;
public PropertyDetails(String key, String id, Object value) {
this.key = key;
this.id = id;
this.value = value;
}
public String getKey() {
return key;
}
public String getId() {
return id;
}
public Object getValue() {
return value;
}
public boolean equals(Object o) {
if (!(o instanceof PropertyDetails)) {
return false;
}
PropertyDetails pd = (PropertyDetails) o;
return Objects.equals(key, pd.key)
&& Objects.equals(id, pd.id)
&& Objects.equals(value, pd.value);
}
public String toString() {
return "[key: " + key + ", id: " + id + ", value: " + value + "]";
}
}
| 9,329 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/PropertyFactory.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
/**
* Factory of Property objects.
*
* @see Property
* @deprecated Deprecated in favor of using PropertyRepository
*/
@Deprecated
public interface PropertyFactory extends PropertyRepository {
/**
* Create a property for the property name.
*/
PropertyContainer getProperty(String propName);
}
| 9,330 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/ConfigListener.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
/**
* Listener for property updates. Due to the cascading nature of property value resolution
* there's not much value in specifying the value or differentiating between set, add and
* delete. Instead the listener is expected to fetch the property value from the config.
*
* @author elandau
*/
public interface ConfigListener {
/**
* Notification that a configuration was added. This will normally only be called
* for CompositeConfig derived implementations.
* @param config
*/
public void onConfigAdded(Config config);
/**
* Notification that a configuration was removed. This will normally only be called
* for CompositeConfig derived implementations.
* @param config
*/
public void onConfigRemoved(Config config);
/**
* Notify the listener that the entire configuration of a child has changed. This is
* normally in response to a snapshot update to a dynamic configuration. A listener will
* likely respond to this by invalidating the entire property registration cache as it
* is more efficient than trying to determine the delta.
* @param config
*/
public void onConfigUpdated(Config config);
/**
* Notify of an error in the configuration listener. The error indicates that the
* DyanmicConfig was not able to update its configuration. The DynamicConfig
* should maintain the last known good state
*
* @param error
*/
public void onError(Throwable error, Config config);
}
| 9,331 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/PropertyListener.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
import java.util.function.Consumer;
/**
* Handler for property change notifications for a single property key
*
* @param <T>
*/
public interface PropertyListener<T> extends Consumer<T> {
/**
* Notification that the property value changed. next=null indicates that the property
* has been deleted.
*
* @param value The new value for the property.
*/
@Deprecated
void onChange(T value);
@Override
default void accept(T value) {
onChange(value);
}
/**
* Notification that a property update failed
* @param error
* @deprecated This method isn't actually used by anyone. Parse errors will be handled in Config
*/
@Deprecated
default void onParseError(Throwable error) {};
}
| 9,332 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/StrInterpolator.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
/**
* API for interpolating a string.
*
* For example,
*
* <pre>
* foo=abc
* {@code
* interpolator.create(lookup).resolve("123-${foo}") -> 123-abc
* }
* </pre>
*
* @author elandau
*
*/
public interface StrInterpolator {
/**
* Lookup of a raw string for replacements. The lookup should not do any replacements.
* If a string with replacements is returned the interpolator will extract the key and
* call back into the lookup to get the value for that key.
*
* @author elandau
*/
public interface Lookup {
String lookup(String key);
}
/**
* Top level context
* @author elandau
*
*/
public interface Context {
/**
* Resolve a string with replaceable variables using the provided map to lookup replacement
* values. The implementation should deal with nested replacements and throw an exception
* for infinite recursion.
*
* @param value
* @return
*/
String resolve(String value);
}
/**
* Create a context though which a value may be resolved. A different context should be created
* for each string being resolved since it tracks state to handle things like circular references.
*
* <pre>
* {@code
* interpolator.create(lookup).resolve("prefix-${foo}");
* }
* </pre>
* @author elandau
*
*/
Context create(Lookup lookup);
}
| 9,333 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/PropertyContainer.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.function.Function;
/**
* Container for a single property that can be parse as any type.
* A PropertyContainer is attached to the Config source from which it
* was created and receives notification of value changes. Implementations
* of Property are non-blocking and optimize updating property values
* in the background so as not to incur any overhead during hot call
* paths.
*/
@Deprecated
public interface PropertyContainer {
/**
* Parse the property as a string
*/
Property<String> asString(String defaultValue);
/**
* Parse the property as an int
*/
Property<Integer> asInteger(Integer defaultValue);
/**
* Parse the property as a Long
*/
Property<Long> asLong(Long defaultValue);
/**
* Parse the property as a double
*/
Property<Double> asDouble(Double defaultValue);
/**
* Parse the property as a float
*/
Property<Float> asFloat(Float defaultValue);
/**
* Parse the property as a short
*/
Property<Short> asShort(Short defaultValue);
/**
* Parse the property as a byte
*/
Property<Byte> asByte(Byte defaultValue);
/**
* Parse the property as a boolean
*/
Property<Boolean> asBoolean(Boolean defaultValue);
/**
* Parse the property as a BigDecimal
*/
Property<BigDecimal> asBigDecimal(BigDecimal defaultValue);
/**
* Parse the property as a BigInteger
*/
Property<BigInteger> asBigInteger(BigInteger defaultValue);
/**
* Custom parsing based on the provided type. An implementation of ObservableProperty
* should be optimized to call one of the known parsing methods based on type.
*/
<T> Property<T> asType(Class<T> type, T defaultValue);
<T> Property<T> asType(Function<String, T> type, String defaultValue);
} | 9,334 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/Layer.java |
package com.netflix.archaius.api;
/**
* Key used to group and order {@link Config}s into layers (@see Layers).
* Layers are ordered by natural order such that lower order values have
* precedence over a higher value. Within a layer configurations are prioritized
* such that the last config wins.
*/
public final class Layer {
private final String name;
private final int order;
/**
* Construct an Layer key.
*
* @param name
* @param order
* @return
*/
public static Layer of(String name, int order) {
return new Layer(name, order);
}
private Layer(String name, int order) {
this.name = name;
this.order = order;
}
public Layer withOrder(int order) {
return new Layer(name, order);
}
public int getOrder() {
return order;
}
public String getName() {
return name;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + order;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Layer other = (Layer) obj;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
if (order != other.order)
return false;
return true;
}
@Override
public String toString() {
return "Key [layerName=" + name + ", layerOrder=" + order + "]";
}
} | 9,335 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/inject/RuntimeLayer.java | package com.netflix.archaius.api.inject;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import javax.inject.Qualifier;
@Qualifier
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.PARAMETER, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
public @interface RuntimeLayer {
}
| 9,336 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/inject/DefaultLayer.java | package com.netflix.archaius.api.inject;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import javax.inject.Qualifier;
@Qualifier
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.PARAMETER, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
public @interface DefaultLayer {
}
| 9,337 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/inject/LibrariesLayer.java | package com.netflix.archaius.api.inject;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import javax.inject.Qualifier;
@Qualifier
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.PARAMETER, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
public @interface LibrariesLayer {
}
| 9,338 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/inject/RemoteLayer.java | package com.netflix.archaius.api.inject;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import javax.inject.Qualifier;
@Qualifier
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.PARAMETER, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
public @interface RemoteLayer {
}
| 9,339 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/config/LayeredConfig.java | package com.netflix.archaius.api.config;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.Layer;
import com.netflix.archaius.api.exceptions.ConfigException;
import java.util.Collection;
import java.util.Optional;
/**
* Composite Config where the override order is driven by Layer keys.
*/
public interface LayeredConfig extends Config {
static interface LayeredVisitor<T> extends Visitor<T> {
/**
* Visit a Config at the specified layer. visitConfig is called in override order
*
* @param layer
* @param child
* @return
*/
T visitConfig(Layer layer, Config config);
}
/**
* Add a Config at the specified Layer.
*
* <p>
* This will trigger an onConfigUpdated event.
*
* @param layer
* @param child
*/
void addConfig(Layer layer, Config config);
void addConfig(Layer layer, Config config, int position);
Optional<Config> removeConfig(Layer layer, String name);
/**
* Return all property sources at a layer
* @param layer
* @return Immutable list of all property sources at the specified layer.
*/
Collection<Config> getConfigsAtLayer(Layer layer);
} | 9,340 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/config/CompositeConfig.java | package com.netflix.archaius.api.config;
import java.util.Collection;
import java.util.LinkedHashMap;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.exceptions.ConfigException;
/**
* Config that is a composite of multiple configuration and as such doesn't track
* properties of its own. The composite does not merge the configurations but instead
* treats them as overrides so that a property existing in a configuration supersedes
* the same property in configuration based on some ordering. Implementations of this
* interface should specify and implement the override ordering.
*/
public interface CompositeConfig extends Config {
static interface CompositeVisitor<T> extends Visitor<T> {
/**
* Visit a child of the configuration
*
* @param name
* @param child
* @return
*/
T visitChild(String name, Config child);
}
/**
* Add a named configuration. The newly added configuration takes precedence over all
* previously added configurations. Duplicate configurations are not allowed
* <p>
* This will trigger an onConfigAdded event.
*
* @param name
* @param child
* @throws ConfigException
*/
boolean addConfig(String name, Config child) throws ConfigException;
/**
* Replace the configuration with the specified name
*
* This will trigger an onConfigUpdated event.
*
* @param name
* @param child
* @throws ConfigException
*/
void replaceConfig(String name, Config child) throws ConfigException;
/**
* Add a map of named configurations. The newly added configurations takes precedence over all
* previously added configurations. Duplicate configurations are not allowed
* <p>
* This will trigger an onConfigAdded event.
*
* @param configs a map of [configName, config]
* @throws ConfigException
*/
void addConfigs(LinkedHashMap<String, Config> configs) throws ConfigException;
/**
* Replace all configurations with the specified names in the map
*
* This will trigger an onConfigUpdated event.
*
* @param configs a map of [configName, config]
* @throws ConfigException
*/
void replaceConfigs(LinkedHashMap<String, Config> configs) throws ConfigException;
/**
* Remove a named configuration.
*
* This will trigger an onConfigRemoved event.
*
* @param name
* @return
*/
Config removeConfig(String name);
/**
* Look up a configuration by name
*
* @param name the config name to look up
* @return the Config that matches the name, null otherwise
*/
Config getConfig(String name);
/**
*
* @return a collection of all configNames
*/
Collection<String> getConfigNames();
}
| 9,341 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/config/PollingStrategy.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api.config;
import java.util.concurrent.Future;
/**
* Strategy for the polling of a source.
* @author elandau
*
*/
public interface PollingStrategy {
public Future<?> execute(Runnable run);
public void shutdown();
}
| 9,342 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/config/SettableConfig.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api.config;
import java.util.Properties;
import com.netflix.archaius.api.Config;
/**
* SPI for a config that may be set from code.
*
* @author elandau
*
*/
public interface SettableConfig extends Config {
/**
* Copy all properties from the 'config' argument into this config.
* @param config
*/
void setProperties(Config config);
/**
* Set a bunch of proeprties
* @param properties
*/
void setProperties(Properties properties);
/**
* Set a single property
* @param propName
* @param propValue
*/
<T> void setProperty(String propName, T propValue);
/**
* Clear a property. Note that the when part of a CompositeConfig only the property
* tracked by the settable config will be cleared and a value for the propertyName
* may exist in a different child config
* @param propName
*/
void clearProperty(String propName);
}
| 9,343 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/exceptions/ConfigException.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api.exceptions;
public class ConfigException extends Exception {
public ConfigException(String message) {
super(message);
}
public ConfigException(String message, Throwable t) {
super(message, t);
}
}
| 9,344 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/annotations/Configuration.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api.annotations;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
/**
* Marks a field as a configuration item. Governator will auto-assign the value based
* on the {@link #value()} of the annotation via the set {@link ConfigurationProvider}.
*/
@Documented
@Retention(java.lang.annotation.RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE})
public @interface Configuration
{
/**
* @return name/key of the config to assign
*/
String prefix() default "";
/**
* @return field names to use for replacement
*/
String[] params() default {};
/**
* @return user displayable description of this configuration
*/
String documentation() default "";
/**
* @return true to allow mapping configuration to fields
*/
boolean allowFields() default false;
/**
* @return true to allow mapping configuration to setters
*/
boolean allowSetters() default true;
/**
* @return Method to call after configuration is bound
*/
String postConfigure() default "";
/**
* @return If true then properties cannot change once set otherwise methods will be
* bound to dynamic properties via PropertyFactory.
*/
boolean immutable() default false;
}
| 9,345 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/annotations/PropertyName.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api.annotations;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
@Documented
@Retention(java.lang.annotation.RetentionPolicy.RUNTIME)
@Target({ElementType.FIELD, ElementType.METHOD})
public @interface PropertyName {
String name();
}
| 9,346 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/annotations/Embedded.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api.annotations;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
@Documented
@Retention(java.lang.annotation.RetentionPolicy.RUNTIME)
@Target({ElementType.FIELD, ElementType.METHOD})
public @interface Embedded {
String name();
}
| 9,347 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/annotations/ConfigurationSource.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api.annotations;
import com.netflix.archaius.api.CascadeStrategy;
import com.netflix.archaius.api.StrInterpolator;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import java.util.List;
/**
* Identifier for a configuration source as well as a customizable policy for
* loading cascaded (or different name variations) of the source.
*
* {@code
* @ConfigurationSource(value="foo")
* class Foo {
*
* }
* @author elandau
*
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
public @interface ConfigurationSource {
/**
* List of named sources to load. This could be a simple name, like 'foo' that is resolved by the
* property loaders or including a type, 'properties:foo.properties'.
*
* @return
*/
String[] value();
/**
* Policy for creating variations of the configuration source names to be loaded.
*/
Class<? extends CascadeStrategy> cascading() default NullCascadeStrategy.class;
static class NullCascadeStrategy implements CascadeStrategy {
@Override
public List<String> generate(String resource, StrInterpolator interpolator, StrInterpolator.Lookup lookup) {
return null;
}
}
}
| 9,348 |
0 | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api | Create_ds/archaius/archaius2-api/src/main/java/com/netflix/archaius/api/annotations/DefaultValue.java | /**
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.archaius.api.annotations;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
@Documented
@Retention(java.lang.annotation.RetentionPolicy.RUNTIME)
@Target({ElementType.FIELD, ElementType.METHOD})
public @interface DefaultValue {
String value();
}
| 9,349 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius/bridge/AbstractConfigurationBridgeFailureTest.java | package com.netflix.archaius.bridge;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.config.ConfigurationManager;
import java.io.PrintWriter;
import java.io.StringWriter;
public class AbstractConfigurationBridgeFailureTest {
public static class TestModule extends AbstractModule {
@Override
protected void configure() {
install(new StaticArchaiusBridgeModule());
install(new ArchaiusModule());
}
}
public static class BadModule extends AbstractModule {
public static String value = ConfigurationManager.getConfigInstance().getString("foo", "default");
@Override
protected void configure() {
}
}
@Before
public void before() throws NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException {
StaticAbstractConfiguration.reset();
StaticDeploymentContext.reset();
}
@Test
public void testStaticInModule() {
try {
Guice.createInjector(
new TestModule(),
new BadModule());
Assert.fail();
} catch (Exception e) {
StringWriter sw = new StringWriter();
e.printStackTrace(new PrintWriter(sw));
String stack = sw.toString();
Assert.assertTrue(stack.contains("com.netflix.archaius.bridge.AbstractConfigurationBridgeFailureTest$BadModule"));
Assert.assertTrue(stack.contains("**** Remove static reference"));
}
}
}
| 9,350 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius/bridge/BaseBridgeTest.java | package com.netflix.archaius.bridge;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
import com.netflix.archaius.api.Config;
import com.netflix.config.AggregatedConfiguration;
import com.netflix.config.ConfigurationManager;
import java.io.IOException;
import javax.inject.Inject;
import javax.inject.Singleton;
@Ignore
public class BaseBridgeTest {
@Singleton
public static class SomeClient {
final String fooValue;
@Inject
public SomeClient(Config config) {
fooValue = ConfigurationManager.getConfigInstance().getString("app.override.foo", null);
}
}
@Test
public void confirmLegacyOverrideOrderResources() throws IOException, ConfigurationException {
AggregatedConfiguration config = (AggregatedConfiguration)ConfigurationManager.getConfigInstance();
ConfigurationManager.loadPropertiesFromConfiguration(
new PropertiesConfiguration("AbstractConfigurationBridgeTest_libA_legacy.properties"));
Assert.assertTrue(config.getBoolean("libA.legacy.loaded", false));
Assert.assertEquals("libA", config.getString("lib.legacy.override", null));
Assert.assertTrue(config.getBoolean("libA.legacy.loaded"));
ConfigurationManager.loadPropertiesFromConfiguration(
new PropertiesConfiguration("AbstractConfigurationBridgeTest_libB_legacy.properties"));
Assert.assertTrue(config.getBoolean("libB.legacy.loaded", false));
Assert.assertEquals("libA", config.getString("lib.legacy.override", null));
Assert.assertTrue(config.getBoolean("libB.legacy.loaded"));
}
}
| 9,351 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius/bridge/AbstractConfigurationBridgeTest.java | package com.netflix.archaius.bridge;
import org.apache.commons.configuration.AbstractConfiguration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.MapConfiguration;
import org.apache.commons.configuration.event.ConfigurationListener;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.mockito.Mockito;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Key;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.annotations.ConfigurationSource;
import com.netflix.archaius.api.config.SettableConfig;
import com.netflix.archaius.api.inject.RuntimeLayer;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.config.AggregatedConfiguration;
import com.netflix.config.ConfigurationManager;
import com.netflix.config.DeploymentContext;
import com.netflix.config.DeploymentContext.ContextKey;
import java.io.IOException;
import java.util.Properties;
public class AbstractConfigurationBridgeTest extends BaseBridgeTest {
public static class TestModule extends AbstractModule {
private Properties properties;
TestModule(Properties props) {
this.properties = props;
}
TestModule() {
this.properties = new Properties();
}
@Override
protected void configure() {
install(new StaticArchaiusBridgeModule());
install(new ArchaiusModule().withApplicationOverrides(properties)) ;
bind(SomeClient.class).asEagerSingleton();
}
}
@Rule
public TestName testName = new TestName();
private static SettableConfig settable;
private static Injector injector;
private static AbstractConfiguration commonsConfig;
private static Config config;
@BeforeClass
public static void before() throws NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException {
final Properties props = new Properties();
props.setProperty("app.override.foo", "bar");
props.setProperty(ContextKey.environment.getKey(), "test");
injector = Guice.createInjector(
new TestModule(props),
new AbstractModule() {
@Override
protected void configure() {
bind(SomeClient.class).asEagerSingleton();
}
});
config = injector.getInstance(Config.class);
settable = injector.getInstance(Key.get(SettableConfig.class, RuntimeLayer.class));
Assert.assertTrue(ConfigurationManager.isConfigurationInstalled());
commonsConfig = ConfigurationManager.getConfigInstance();
Assert.assertEquals(StaticAbstractConfiguration.class, commonsConfig.getClass());
}
@Test
public void testBasicWiring() {
SomeClient client = injector.getInstance(SomeClient.class);
Assert.assertNotNull(ConfigurationManager.getConfigInstance());
Assert.assertEquals("bar", client.fooValue);
}
@ConfigurationSource(value={"AbstractConfigurationBridgeTest_libA"})
static class LibA {
}
@ConfigurationSource(value={"AbstractConfigurationBridgeTest_libB"})
static class LibB {
}
@Test
public void lastLoadedLibraryWins() throws IOException {
Config config = injector.getInstance(Config.class);
injector.getInstance(LibA.class);
Assert.assertTrue(config.getBoolean("libA.loaded", false));
Assert.assertEquals("libA", config.getString("lib.override", null));
injector.getInstance(LibB.class);
Assert.assertTrue(config.getBoolean("libB.loaded", false));
Assert.assertEquals("libA", config.getString("lib.override", null));
}
@Test
public void basicBridgeTest() throws IOException {
DeploymentContext context1 = ConfigurationManager.getDeploymentContext();
Assert.assertNotNull(context1);
Assert.assertEquals("test", context1.getDeploymentEnvironment());
AbstractConfiguration config1 = ConfigurationManager.getConfigInstance();
DeploymentContext contextDi = injector.getInstance(DeploymentContext.class);
Assert.assertNotSame(contextDi, context1);
ConfigurationManager.loadCascadedPropertiesFromResources("AbstractConfigurationBridgeTest_libA");
Assert.assertTrue(config1.getBoolean("libA.loaded", false));
Assert.assertEquals("libA", config1.getString("lib.override", null));
Config config2 = injector.getInstance(Config.class);
SettableConfig settable = injector.getInstance(Key.get(SettableConfig.class, RuntimeLayer.class));
settable.setProperty("@environment", "foo");
DeploymentContext context2 = ConfigurationManager.getDeploymentContext();
Assert.assertEquals("foo", ConfigurationManager.getDeploymentContext().getDeploymentEnvironment());
Assert.assertEquals("foo", context2.getDeploymentEnvironment());
Assert.assertNotSame(contextDi, context1);
Assert.assertEquals("foo", context1.getDeploymentEnvironment());
Assert.assertTrue(config2.getBoolean("libA.loaded", false));
Assert.assertEquals("libA", config2.getString("lib.override", null));
ConfigurationManager.loadCascadedPropertiesFromResources("AbstractConfigurationBridgeTest_libB");
Assert.assertTrue(config1.getBoolean("libB.loaded", false));
Assert.assertEquals("libA", config1.getString("lib.override", null));
Assert.assertTrue(config2.getBoolean("libB.loaded", false));
Assert.assertEquals("libA", config2.getString("lib.override", null));
}
/**
* This test was written to confirm the legacy API behavior. It cannot be run
* with the other tests since the static state of ConfigurationManager cannot
* be reset between tests.
* @throws IOException
*/
@Test
public void testBridgePropertiesFromLegacyToNew() throws IOException {
AbstractConfiguration config1 = ConfigurationManager.getConfigInstance();
Config config2 = injector.getInstance(Config.class);
ConfigurationManager.loadCascadedPropertiesFromResources("AbstractConfigurationBridgeTest_libA");
Assert.assertTrue(config1.getBoolean("libA.loaded", false));
Assert.assertEquals("libA", config1.getString("lib.override", null));
Assert.assertTrue(config2.getBoolean("libA.loaded", false));
Assert.assertEquals("libA", config2.getString("lib.override", null));
ConfigurationManager.loadCascadedPropertiesFromResources("AbstractConfigurationBridgeTest_libB");
Assert.assertTrue(config1.getBoolean("libB.loaded", false));
Assert.assertEquals("libA", config1.getString("lib.override", null));
Assert.assertTrue(config2.getBoolean("libB.loaded", false));
Assert.assertEquals("libA", config2.getString("lib.override", null));
}
/**
* This test was written to confirm the legacy API behavior. It cannot be run
* with the other tests since the static state of ConfigurationManager cannot
* be reset between tests.
* @throws IOException
*/
@Test
public void confirmLegacyOverrideOrder() throws IOException {
AbstractConfiguration config = ConfigurationManager.getConfigInstance();
ConfigurationManager.loadCascadedPropertiesFromResources("AbstractConfigurationBridgeTest_libA");
Assert.assertTrue(config.getBoolean("libA.loaded", false));
Assert.assertEquals("libA", config.getString("lib.override", null));
ConfigurationManager.loadCascadedPropertiesFromResources("AbstractConfigurationBridgeTest_libB");
Assert.assertTrue(config.getBoolean("libB.loaded", false));
Assert.assertEquals("libA", config.getString("lib.override", null));
ConfigurationManager.loadCascadedPropertiesFromResources("AbstractConfigurationBridgeTest_libB");
}
/**
* This test was written to confirm the legacy API behavior. It cannot be run
* with the other tests since the static state of ConfigurationManager cannot
* be reset between tests.
* @throws IOException
* @throws ConfigurationException
*/
@Test
public void confirmLegacyOverrideOrderResources() throws IOException, ConfigurationException {
super.confirmLegacyOverrideOrderResources();
Assert.assertEquals("libA", config.getString("lib.legacy.override"));
Assert.assertTrue(config.getBoolean("libA.legacy.loaded"));
Assert.assertTrue(config.getBoolean("libB.legacy.loaded"));
}
/**
* This test was written to confirm the legacy API behavior. It cannot be run
* with the other tests since the static state of ConfigurationManager cannot
* be reset between tests.
* @throws IOException
*/
@Test
public void confirmLegacyOverrideOrderAddConfig() throws IOException {
AggregatedConfiguration aggregatedConfig = (AggregatedConfiguration) ConfigurationManager.getConfigInstance();
Properties p1 = new Properties();
p1.setProperty("lib.override", "libA");
p1.setProperty("libA.loaded", "true");
aggregatedConfig.addConfiguration(new MapConfiguration(p1));
Assert.assertTrue(aggregatedConfig.getBoolean("libA.loaded", false));
Assert.assertEquals("libA", aggregatedConfig.getString("lib.override", null));
Properties p2 = new Properties();
p2.setProperty("lib.override", "libB");
p2.setProperty("libB.loaded", "true");
aggregatedConfig.addConfiguration(new MapConfiguration(p2));
Assert.assertTrue(aggregatedConfig.getBoolean("libB.loaded", false));
Assert.assertEquals("libA", aggregatedConfig.getString("lib.override", null));
}
@Test
public void testCommonConfigurationListener() {
ConfigurationListener listener = Mockito.mock(ConfigurationListener.class);
AbstractConfiguration config = ConfigurationManager.getConfigInstance();
config.addConfigurationListener(listener);
SettableConfig settable = injector.getInstance(Key.get(SettableConfig.class, RuntimeLayer.class));
settable.setProperty("new_property", "new_value");
Mockito.verify(listener, Mockito.times(2)).configurationChanged(Mockito.any());
}
@Test
public void verifyValueInterpollation() {
AbstractConfiguration config = ConfigurationManager.getConfigInstance();
config.setProperty("foo", "${ABC:true}");
boolean value = config.getBoolean("foo");
Assert.assertTrue(value);
}
@Test
public void verifyMissingProperty() {
AbstractConfiguration config = ConfigurationManager.getConfigInstance();
Boolean value = config.getBoolean("foo", null);
Assert.assertNull(value);
}
}
| 9,352 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius/bridge/Archaius1BehaviorTest.java | package com.netflix.archaius.bridge;
public class Archaius1BehaviorTest extends BaseBridgeTest {
}
| 9,353 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius/bridge/ConfigToCommonsTest.java | package com.netflix.archaius.bridge;
import java.util.NoSuchElementException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.junit.Assert;
import org.junit.Test;
import com.netflix.archaius.config.MapConfig;
public class ConfigToCommonsTest {
private ConfigToCommonsAdapter config = new ConfigToCommonsAdapter(MapConfig.builder()
.put("boolean", true)
.put("string", "set")
.put("interpolated", "${string}")
.build()
);
@Test
public void testIsEmptyAPI() {
Assert.assertFalse(config.isEmpty());
}
@Test
public void confirmStringWorks() {
Assert.assertEquals("set", config.getString("string"));
}
@Test
public void confirmInterpolationWorks() {
Assert.assertEquals("set", config.getString("interpolated"));
}
@Test
public void configNonStringWorks() {
Assert.assertEquals(true, config.getBoolean("boolean"));
}
@Test(expected=NoSuchElementException.class)
public void configNonExistentKeyWorks() {
Assert.assertNull(config.getString("nonexistent", null));
}
@Test(expected=UnsupportedOperationException.class)
public void configIsImmutable() {
config.setProperty("foo", "bar");
}
@Test
public void test() {
PropertiesConfiguration config = new PropertiesConfiguration();
config.setDelimiterParsingDisabled(true);
config.setProperty("foo", "bar,bar1");
}
}
| 9,354 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius/bridge/StaticBridgeAddConfigurationTest.java | package com.netflix.archaius.bridge;
import org.apache.commons.configuration.event.ConfigurationEvent;
import org.apache.commons.configuration.event.ConfigurationListener;
import org.junit.Test;
import org.mockito.Mockito;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.netflix.archaius.api.config.SettableConfig;
import com.netflix.archaius.api.inject.RuntimeLayer;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.config.ConfigurationManager;
import javax.inject.Inject;
public class StaticBridgeAddConfigurationTest {
private static ConfigurationListener listener = Mockito.mock(ConfigurationListener.class);
public static class Foo {
public static void addListenerBeforeBridgeInitialization() {
ConfigurationManager.getConfigInstance().addConfigurationListener(listener);
}
}
@Inject
@RuntimeLayer
SettableConfig settableConfig;
@Test
public void listenerAddedToStaticBeforeStaticInjection() {
Guice.createInjector(new ArchaiusModule(), new StaticArchaiusBridgeModule(), new AbstractModule() {
@Override
protected void configure() {
Foo.addListenerBeforeBridgeInitialization();
this.requestInjection(StaticBridgeAddConfigurationTest.this);
}
});
// Verify that the listener is called
Mockito.verify(listener, Mockito.never()).configurationChanged(Mockito.isA(ConfigurationEvent.class));
settableConfig.setProperty("foo", "bar");
Mockito.verify(listener, Mockito.times(2)).configurationChanged(Mockito.isA(ConfigurationEvent.class));
// Listener no longer invoked after reset
StaticArchaiusBridgeModule.resetStaticBridges();
settableConfig.setProperty("bar", "baz");
Mockito.verify(listener, Mockito.times(2)).configurationChanged(Mockito.isA(ConfigurationEvent.class));
}
}
| 9,355 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/test/java/com/netflix/archaius/bridge/DynamicPropertyTest.java | package com.netflix.archaius.bridge;
import org.apache.commons.configuration.AbstractConfiguration;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Key;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.Property;
import com.netflix.archaius.api.PropertyFactory;
import com.netflix.archaius.api.config.SettableConfig;
import com.netflix.archaius.api.inject.RuntimeLayer;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.archaius.visitor.PrintStreamVisitor;
import com.netflix.config.ConfigurationManager;
import com.netflix.config.DynamicIntProperty;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.config.DynamicStringProperty;
public class DynamicPropertyTest {
@Rule
public TestName testName = new TestName();
private Injector injector;
@Before
public void before() {
injector = Guice.createInjector(new ArchaiusModule(), new StaticArchaiusBridgeModule());
}
@After
public void after() {
StaticArchaiusBridgeModule.resetStaticBridges();
}
@Test
public void settingOnArchaius2UpdateArchaius1() {
Property<String> a2prop = injector.getInstance(PropertyFactory.class).getProperty(testName.getMethodName()).asString("default");
DynamicStringProperty a1prop = DynamicPropertyFactory.getInstance().getStringProperty(testName.getMethodName(), "default");
Assert.assertEquals("default", a1prop.get());
Assert.assertEquals("default", a2prop.get());
SettableConfig config = injector.getInstance(Key.get(SettableConfig.class, RuntimeLayer.class));
config.setProperty(testName.getMethodName(), "newvalue");
Assert.assertEquals("newvalue", a2prop.get());
Assert.assertEquals("newvalue", a1prop.get());
}
@Test
public void testNonStringDynamicProperty() {
Config config = injector.getInstance(Config.class);
config.accept(new PrintStreamVisitor());
ConfigurationManager.getConfigInstance().setProperty("foo", 123);
Property<Integer> prop2 = injector.getInstance(PropertyFactory.class).getProperty("foo").asInteger(1);
DynamicIntProperty prop = DynamicPropertyFactory.getInstance().getIntProperty("foo", 2);
Assert.assertEquals(123, (int)prop2.get());
Assert.assertEquals(123, prop.get());
}
@Test
public void testInterpolation() {
Config config = injector.getInstance(Config.class);
config.forEachProperty((k, v) -> System.out.println(k + " = " + v));
ConfigurationManager.getConfigInstance().setProperty("foo", "${bar}");
ConfigurationManager.getConfigInstance().setProperty("bar", "value");
DynamicStringProperty prop = DynamicPropertyFactory.getInstance().getStringProperty("foo", "default");
Assert.assertEquals("value", prop.get());
}
@Test
public void testPropertyDeletion() {
AbstractConfiguration config1 = ConfigurationManager.getConfigInstance();
Config config2 = injector.getInstance(Config.class);
config1.setProperty("libA.loaded", "true");
Assert.assertTrue(config1.getBoolean("libA.loaded", false));
Assert.assertTrue(config2.getBoolean("libA.loaded", false));
config1.clearProperty("libA.loaded");
Assert.assertFalse(config1.getBoolean("libA.loaded", false));
Assert.assertFalse(config2.getBoolean("libA.loaded", false));
}
}
| 9,356 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/main/java/com/netflix/archaius/bridge/AbstractConfigurationBridge.java | package com.netflix.archaius.bridge;
import java.util.ArrayList;
import java.util.HashSet;
import org.apache.commons.configuration.AbstractConfiguration;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.HierarchicalConfiguration;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.ConfigListener;
import com.netflix.archaius.api.config.CompositeConfig;
import com.netflix.archaius.api.config.SettableConfig;
import com.netflix.archaius.api.exceptions.ConfigException;
import com.netflix.archaius.api.inject.LibrariesLayer;
import com.netflix.archaius.api.inject.RuntimeLayer;
import com.netflix.archaius.commons.CommonsToConfig;
import com.netflix.archaius.config.DefaultConfigListener;
import com.netflix.archaius.exceptions.ConfigAlreadyExistsException;
import com.netflix.config.AggregatedConfiguration;
import com.netflix.config.DynamicPropertySupport;
import com.netflix.config.PropertyListener;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import javax.inject.Inject;
import javax.inject.Singleton;
/**
* @see StaticArchaiusBridgeModule
*/
@Singleton
class AbstractConfigurationBridge extends AbstractConfiguration implements AggregatedConfiguration, DynamicPropertySupport {
private final Config config;
private final SettableConfig settable;
private final CompositeConfig libraries;
private final AtomicInteger libNameCounter = new AtomicInteger();
{
AbstractConfiguration.setDefaultListDelimiter('\0');
}
@Inject
public AbstractConfigurationBridge(
final Config config,
@LibrariesLayer CompositeConfig libraries,
@RuntimeLayer SettableConfig settable) {
this.config = config;
this.settable = settable;
this.libraries = libraries;
this.config.addListener(new ConfigListener() {
@Override
public void onConfigAdded(Config child) {
onConfigUpdated(config);
}
@Override
public void onConfigRemoved(Config child) {
onConfigUpdated(config);
}
@Override
public void onConfigUpdated(Config config) {
fireEvent(HierarchicalConfiguration.EVENT_ADD_NODES, null, null, true);
fireEvent(HierarchicalConfiguration.EVENT_ADD_NODES, null, null, false);
}
@Override
public void onError(Throwable error, Config config) {
}
});
}
@Override
public boolean isEmpty() {
return config.isEmpty();
}
@Override
public boolean containsKey(String key) {
return config.containsKey(key);
}
@Override
public String getString(String key, String defaultValue) {
return config.getString(key, defaultValue);
}
@Override
public Object getProperty(String key) {
return config.getRawProperty(key); // Should interpolate
}
@Override
public Iterator<String> getKeys() {
return config.getKeys();
}
@Override
protected void addPropertyDirect(String key, Object value) {
settable.setProperty(key, value);
}
@Override
protected void clearPropertyDirect(String key) {
settable.clearProperty(key);
}
@Override
public void addConfiguration(AbstractConfiguration config) {
addConfiguration(config, "Config-" + libNameCounter.incrementAndGet());
}
@Override
public void addConfiguration(AbstractConfiguration config, String name) {
try {
libraries.addConfig(name, new CommonsToConfig(config));
}
catch (ConfigAlreadyExistsException e) {
// OK To ignore
}
catch (ConfigException e) {
throw new RuntimeException("Unable to add configuration " + name, e);
}
}
@Override
public Set<String> getConfigurationNames() {
return new HashSet<>(libraries.getConfigNames());
}
@Override
public List<String> getConfigurationNameList() {
return new ArrayList<>(libraries.getConfigNames());
}
@Override
public Configuration getConfiguration(String name) {
return new ConfigToCommonsAdapter(libraries.getConfig(name));
}
@Override
public int getNumberOfConfigurations() {
return libraries.getConfigNames().size();
}
@Override
public Configuration getConfiguration(int index) {
throw new UnsupportedOperationException();
}
@Override
public List<AbstractConfiguration> getConfigurations() {
throw new UnsupportedOperationException();
}
@Override
public Configuration removeConfiguration(String name) {
libraries.removeConfig(name);
return null;
}
@Override
public boolean removeConfiguration(Configuration config) {
throw new UnsupportedOperationException();
}
@Override
public Configuration removeConfigurationAt(int index) {
throw new UnsupportedOperationException();
}
@Override
public void addConfigurationListener(final PropertyListener expandedPropertyListener) {
config.addListener(new DefaultConfigListener() {
@Override
public void onConfigAdded(Config config) {
expandedPropertyListener.configSourceLoaded(config);
}
@Override
public void onConfigRemoved(Config config) {
expandedPropertyListener.configSourceLoaded(config);
}
@Override
public void onConfigUpdated(Config config) {
expandedPropertyListener.configSourceLoaded(config);
}
});
}
public Object resolve(String value) {
return config.resolve(value);
}
}
| 9,357 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/main/java/com/netflix/archaius/bridge/StaticAbstractConfiguration.java | package com.netflix.archaius.bridge;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.configuration.AbstractConfiguration;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.event.ConfigurationEvent;
import org.apache.commons.configuration.event.ConfigurationListener;
import com.netflix.config.AggregatedConfiguration;
import com.netflix.config.ConfigurationManager;
import com.netflix.config.DeploymentContext;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.config.DynamicPropertySupport;
import com.netflix.config.PropertyListener;
/**
* @see StaticArchaiusBridgeModule
*/
@Singleton
public class StaticAbstractConfiguration extends AbstractConfiguration implements AggregatedConfiguration, DynamicPropertySupport {
private static volatile AbstractConfigurationBridge delegate;
private static final StaticAbstractConfiguration INSTANCE = new StaticAbstractConfiguration();
@Inject
public synchronized static void initialize(DeploymentContext context, AbstractConfigurationBridge config) {
reset();
delegate = config;
AbstractConfiguration actualConfig = ConfigurationManager.getConfigInstance();
if (!actualConfig.equals(INSTANCE)) {
UnsupportedOperationException cause = new UnsupportedOperationException("**** Remove static reference to ConfigurationManager or FastProperty in this call stack ****");
cause.setStackTrace(ConfigurationManager.getStaticInitializationSource());
throw new IllegalStateException("Not using expected bridge!!! " + actualConfig.getClass() + " instead of " + StaticAbstractConfiguration.class, cause);
}
DynamicPropertyFactory.initWithConfigurationSource((AbstractConfiguration)INSTANCE);
// Bridge change notification from the new delegate to any listeners registered on
// this static class. Notifications will be removed if the StaticAbstractConfiguration
// is reset and will reattached to a new bridge should initialize be called again.
config.addConfigurationListener(INSTANCE.forwardingConfigurationListener);
config.addConfigurationListener(INSTANCE.forwardingPropertyListener);
}
public static AbstractConfiguration getInstance() {
return INSTANCE;
}
public synchronized static void reset() {
if (delegate != null) {
delegate.removeConfigurationListener(INSTANCE.forwardingConfigurationListener);
}
delegate = null;
}
private final PropertyListener forwardingPropertyListener;
private final ConfigurationListener forwardingConfigurationListener;
private final CopyOnWriteArrayList<PropertyListener> propertyListeners = new CopyOnWriteArrayList<>();
public StaticAbstractConfiguration() {
this.forwardingPropertyListener = new PropertyListener() {
@Override
public void configSourceLoaded(Object source) {
propertyListeners.forEach(listener -> listener.configSourceLoaded(source));
}
@Override
public void addProperty(Object source, String name, Object value, boolean beforeUpdate) {
propertyListeners.forEach(listener -> listener.addProperty(source, name, value, beforeUpdate));
}
@Override
public void setProperty(Object source, String name, Object value, boolean beforeUpdate) {
propertyListeners.forEach(listener -> listener.setProperty(source, name, value, beforeUpdate));
}
@Override
public void clearProperty(Object source, String name, Object value, boolean beforeUpdate) {
propertyListeners.forEach(listener -> listener.clearProperty(source, name, value, beforeUpdate));
}
@Override
public void clear(Object source, boolean beforeUpdate) {
propertyListeners.forEach(listener -> listener.clear(source, beforeUpdate));
}
};
this.forwardingConfigurationListener = new ConfigurationListener() {
@Override
public void configurationChanged(ConfigurationEvent event) {
StaticAbstractConfiguration.this.fireEvent(event.getType(), event.getPropertyName(), event.getPropertyValue(), event.isBeforeUpdate());
}
};
}
@Override
public boolean isEmpty() {
if (delegate == null) {
System.err.println("[isEmpty()] StaticAbstractConfiguration not initialized yet.");
return true;
}
return delegate.isEmpty();
}
@Override
public boolean containsKey(String key) {
if (delegate == null) {
System.err.println("[containsKey(" + key + ")] StaticAbstractConfiguration not initialized yet.");
return false;
}
return delegate.containsKey(key);
}
@Override
public String getString(String key, String defaultValue) {
if (delegate == null) {
System.out.println("[getString(" + key + ", " + defaultValue + ")] StaticAbstractConfiguration not initialized yet.");
return defaultValue;
}
return delegate.getString(key, defaultValue);
}
@Override
public Object getProperty(String key) {
if (delegate == null) {
System.out.println("[getProperty(" + key + ")] StaticAbstractConfiguration not initialized yet.");
return null;
}
return delegate.getProperty(key);
}
@Override
public Iterator<String> getKeys() {
if (delegate == null) {
throw new RuntimeException("[getKeys()] StaticAbstractConfiguration not initialized yet.");
}
return delegate.getKeys();
}
@Override
public void addConfiguration(AbstractConfiguration config) {
delegate.addConfiguration(config);
}
@Override
public void addConfiguration(AbstractConfiguration config, String name) {
delegate.addConfiguration(config, name);
}
@Override
public Set<String> getConfigurationNames() {
return delegate.getConfigurationNames();
}
@Override
public List<String> getConfigurationNameList() {
return delegate.getConfigurationNameList();
}
@Override
public Configuration getConfiguration(String name) {
return delegate.getConfiguration(name);
}
@Override
public int getNumberOfConfigurations() {
return delegate.getNumberOfConfigurations();
}
@Override
public Configuration getConfiguration(int index) {
return delegate.getConfiguration(index);
}
@Override
public List<AbstractConfiguration> getConfigurations() {
return delegate.getConfigurations();
}
@Override
public Configuration removeConfiguration(String name) {
return delegate.removeConfiguration(name);
}
@Override
public boolean removeConfiguration(Configuration config) {
return delegate.removeConfiguration(config);
}
@Override
public Configuration removeConfigurationAt(int index) {
return delegate.removeConfigurationAt(index);
}
@Override
protected void addPropertyDirect(String key, Object value) {
delegate.addPropertyDirect(key, value);
}
@Override
protected String interpolate(String base) {
return delegate.resolve(base).toString();
}
@Override
protected Object interpolate(Object value) {
return delegate.resolve(value.toString());
}
@Override
protected void clearPropertyDirect(String key) {
delegate.clearProperty(key);
}
public Collection<ConfigurationListener> getConfigurationListeners() {
List<ConfigurationListener> listeners = new ArrayList<>(super.getConfigurationListeners());
Optional.ofNullable(delegate).ifPresent(d -> listeners.addAll(d.getConfigurationListeners()));
return listeners;
}
@Override
public void addConfigurationListener(PropertyListener expandedPropertyListener) {
propertyListeners.add(expandedPropertyListener);
}
}
| 9,358 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/main/java/com/netflix/archaius/bridge/StaticDeploymentContext.java | package com.netflix.archaius.bridge;
import com.netflix.config.DeploymentContext;
import javax.inject.Inject;
public final class StaticDeploymentContext implements DeploymentContext {
private static final StaticDeploymentContext INSTANCE = new StaticDeploymentContext();
private static volatile DeploymentContext delegate;
@Inject
public static void initialize(DeploymentContext context) {
delegate = context;
}
public static void reset() {
delegate = null;
}
public static DeploymentContext getInstance() {
return INSTANCE;
}
@Override
public String getDeploymentEnvironment() {
return getValue(ContextKey.environment);
}
@Override
public void setDeploymentEnvironment(String env) {
delegate.setDeploymentEnvironment(env);
}
@Override
public String getDeploymentDatacenter() {
return getValue(ContextKey.datacenter);
}
@Override
public void setDeploymentDatacenter(String deployedAt) {
delegate.setDeploymentDatacenter(deployedAt);
}
@Override
public String getApplicationId() {
return getValue(ContextKey.appId);
}
@Override
public void setApplicationId(String appId) {
setValue(ContextKey.appId, appId);
}
@Override
public String getDeploymentServerId() {
return getValue(ContextKey.serverId);
}
@Override
public void setDeploymentServerId(String serverId) {
setValue(ContextKey.serverId, serverId);
}
@Override
public String getDeploymentStack() {
return getValue(ContextKey.stack);
}
@Override
public String getValue(ContextKey key) {
if (delegate == null) {
System.out.println("Configuration not yet initialized. Returning 'null' for " + key);
return null;
}
return delegate.getValue(key);
}
@Override
public void setValue(ContextKey key, String value) {
delegate.setValue(key, value);
}
@Override
public void setDeploymentStack(String stack) {
setValue(ContextKey.stack, stack);
}
@Override
public String getDeploymentRegion() {
return getValue(ContextKey.region);
}
@Override
public void setDeploymentRegion(String region) {
setValue(ContextKey.region, region);
}
}
| 9,359 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/main/java/com/netflix/archaius/bridge/ConfigBasedDeploymentContext.java | package com.netflix.archaius.bridge;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.config.SettableConfig;
import com.netflix.archaius.api.inject.RuntimeLayer;
import com.netflix.config.DeploymentContext;
/**
* DeploymentContext that reads the ContextKey properties from the top level
* injected Config
*
* @author elandau
*/
@Singleton
public class ConfigBasedDeploymentContext implements DeploymentContext {
private final Config config;
private final SettableConfig override;
@Inject
public ConfigBasedDeploymentContext(Config config, @RuntimeLayer SettableConfig override) {
this.config = config;
this.override = override;
}
@Override
public String getDeploymentEnvironment() {
return config.getString(ContextKey.environment.getKey(), "");
}
@Override
public void setDeploymentEnvironment(String env) {
override.setProperty(ContextKey.environment.getKey(), env);
}
@Override
public String getDeploymentDatacenter() {
return config.getString(ContextKey.datacenter.getKey(), "");
}
@Override
public void setDeploymentDatacenter(String deployedAt) {
override.setProperty(ContextKey.datacenter.getKey(), deployedAt);
}
@Override
public String getApplicationId() {
return config.getString(ContextKey.appId.getKey(), "");
}
@Override
public void setApplicationId(String appId) {
override.setProperty(ContextKey.appId.getKey(), appId);
}
@Override
public void setDeploymentServerId(String serverId) {
override.setProperty(ContextKey.serverId.getKey(), serverId);
}
@Override
public String getDeploymentServerId() {
return config.getString(ContextKey.serverId.getKey(), "");
}
@Override
public String getDeploymentStack() {
return config.getString(ContextKey.stack.getKey(), "");
}
@Override
public String getValue(ContextKey key) {
return config.getString(key.getKey(), "");
}
@Override
public void setValue(ContextKey key, String value) {
override.setProperty(key.getKey(), value);
}
@Override
public void setDeploymentStack(String stack) {
override.setProperty(ContextKey.stack.getKey(), stack);
}
@Override
public String getDeploymentRegion() {
return config.getString(ContextKey.region.getKey(), "");
}
@Override
public void setDeploymentRegion(String region) {
override.setProperty(ContextKey.region.getKey(), region);
}
}
| 9,360 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/main/java/com/netflix/archaius/bridge/ConfigToCommonsAdapter.java | package com.netflix.archaius.bridge;
import java.util.Iterator;
import org.apache.commons.configuration.AbstractConfiguration;
import com.netflix.archaius.api.Config;
/**
* Adapter from an Archaius2 configuration to an Apache Commons Configuration.
*
* Note that since Archaius2 treats the Config as immutable setting properties
* is not allowed.
*
* @author elandau
*/
class ConfigToCommonsAdapter extends AbstractConfiguration {
private Config config;
public ConfigToCommonsAdapter(Config config) {
this.config = config;
}
@Override
public boolean isEmpty() {
return config.isEmpty();
}
@Override
public boolean containsKey(String key) {
return config.containsKey(key);
}
@Override
public Object getProperty(String key) {
return config.getString(key);
}
@Override
public Iterator<String> getKeys() {
return config.getKeys();
}
@Override
protected void addPropertyDirect(String key, Object value) {
throw new UnsupportedOperationException("Can't set key '" + key + "'. Config is immutable.");
}
}
| 9,361 |
0 | Create_ds/archaius/archaius2-archaius1-bridge/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-archaius1-bridge/src/main/java/com/netflix/archaius/bridge/StaticArchaiusBridgeModule.java | package com.netflix.archaius.bridge;
import com.google.inject.AbstractModule;
import com.netflix.config.DeploymentContext;
/**
* Module with bindings to bridge the legacy static Archaius1 ConfigurationManager API with the new
* the Archaius2 Config guice bindings. Configuration loaded into either library will be visible
* to the other.
*
* To install,
* <pre>
* {@code
* Guice.createInjector(new ArchaiusModule(), new StaticArchaiusBridgeModule());
* }
* </pre>
*
* When running multiple unit tests make sure to add the following @Before method to your JUnit classes
*
* <pre>
* {@code
* @Before
* public void before() throws NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException {
* StaticAbstractConfiguration.reset();
* StaticDeploymentContext.reset();
* }
* }
* </pre>
*/
public final class StaticArchaiusBridgeModule extends AbstractModule {
static {
System.setProperty("archaius.default.configuration.factory", StaticAbstractConfiguration.class.getName());
System.setProperty("archaius.default.deploymentContext.factory", StaticDeploymentContext.class.getName());
}
@Override
protected void configure() {
requestStaticInjection(StaticDeploymentContext.class);
requestStaticInjection(StaticAbstractConfiguration.class);
bind(DeploymentContext.class).to(ConfigBasedDeploymentContext.class);
}
public static void resetStaticBridges() {
StaticAbstractConfiguration.reset();
StaticDeploymentContext.reset();
}
@Override
public boolean equals(Object obj) {
return StaticArchaiusBridgeModule.class.equals(obj.getClass());
}
@Override
public int hashCode() {
return StaticArchaiusBridgeModule.class.hashCode();
}
}
| 9,362 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3/internal/LocalCacheQueryProcessorTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import com.netflix.archaius.api.config.SettableConfig;
import com.netflix.archaius.config.DefaultSettableConfig;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.event.JobManagerEvent;
import com.netflix.titus.api.jobmanager.model.job.event.JobUpdateEvent;
import com.netflix.titus.api.jobmanager.model.job.event.TaskUpdateEvent;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.api.model.callmetadata.CallMetadataConstants;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.JobDescriptor;
import com.netflix.titus.grpc.protogen.JobQuery;
import com.netflix.titus.grpc.protogen.JobQueryResult;
import com.netflix.titus.grpc.protogen.LogLocation;
import com.netflix.titus.grpc.protogen.ObserveJobsQuery;
import com.netflix.titus.grpc.protogen.Page;
import com.netflix.titus.grpc.protogen.TaskQuery;
import com.netflix.titus.grpc.protogen.TaskQueryResult;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.runtime.connector.jobmanager.JobDataReplicator;
import com.netflix.titus.runtime.connector.jobmanager.snapshot.JobSnapshot;
import com.netflix.titus.runtime.connector.jobmanager.snapshot.JobSnapshotFactories;
import com.netflix.titus.runtime.endpoint.common.EmptyLogStorageInfo;
import com.netflix.titus.testkit.model.job.JobGenerator;
import com.netflix.titus.testkit.rx.ExtTestSubscriber;
import org.junit.Before;
import org.junit.Test;
import reactor.core.publisher.Sinks;
import reactor.core.scheduler.Schedulers;
import static com.jayway.awaitility.Awaitility.await;
import static com.netflix.titus.gateway.service.v3.internal.LocalCacheQueryProcessor.PARAMETER_USE_CACHE;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class LocalCacheQueryProcessorTest {
private static final JobQuery JOB_QUERY_ALL_WITH_PAGE_SIZE_2 = JobQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(2).build())
.build();
private static final TaskQuery TASK_QUERY_ALL_WITH_PAGE_SIZE_2 = TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(2).build())
.build();
private static final CallMetadata JUNIT_CALL_METADATA = CallMetadata.newBuilder().withCallerId("junit").build();
private final TitusRuntime titusRuntime = TitusRuntimes.internal();
private final SettableConfig configurationMap = new DefaultSettableConfig();
private final GatewayConfiguration configuration = Archaius2Ext.newConfiguration(GatewayConfiguration.class, configurationMap);
private final JobDataReplicator jobDataReplicator = mock(JobDataReplicator.class);
private final LocalCacheQueryProcessor processor = new LocalCacheQueryProcessor(
configuration,
jobDataReplicator,
EmptyLogStorageInfo.empty(),
Schedulers.immediate(),
titusRuntime
);
private final Sinks.Many<Pair<JobSnapshot, JobManagerEvent<?>>> jobDataReplicatorSink = Sinks.many().multicast().directAllOrNothing();
private final Sinks.Many<Long> jobDataReplicatorCheckpointSink = Sinks.many().multicast().directAllOrNothing();
@Before
public void setUp() {
when(jobDataReplicator.getCurrent()).thenReturn(JobSnapshotFactories.newDefaultEmptySnapshot(titusRuntime));
when(jobDataReplicator.events()).thenReturn(jobDataReplicatorSink.asFlux());
when(jobDataReplicator.observeLastCheckpointTimestamp()).thenReturn(jobDataReplicatorCheckpointSink.asFlux());
}
@Test
public void testCanUseCacheByDirectRequest() {
assertThat(processor.canUseCache(Collections.emptyMap(), "anything", CallMetadataConstants.UNDEFINED_CALL_METADATA)).isFalse();
assertThat(processor.canUseCache(Collections.singletonMap(PARAMETER_USE_CACHE, "true"), "anything", CallMetadataConstants.UNDEFINED_CALL_METADATA)).isTrue();
}
@Test
public void testCanUseCacheByCallerId() {
assertThat(processor.canUseCache(Collections.emptyMap(), "anything", JUNIT_CALL_METADATA)).isFalse();
configurationMap.setProperty("titusGateway.queryFromCacheCallerId", "junit.*");
assertThat(processor.canUseCache(Collections.emptyMap(), "anything", JUNIT_CALL_METADATA)).isTrue();
}
@Test
public void testFindJob() {
Job<?> job1 = addToJobDataReplicator(newJobAndTasks("job1", 2)).getLeft();
Job<?> job2 = addToJobDataReplicator(newJobAndTasks("job2", 4)).getLeft();
for (Job<?> job : Arrays.asList(job1, job2)) {
com.netflix.titus.grpc.protogen.Job result = processor.findJob(job.getId()).orElse(null);
assertThat(result).isNotNull();
assertThat(result.getId()).isEqualTo(job.getId());
}
}
@Test
public void testFindJobs() {
Job<?> job1 = addToJobDataReplicator(newJobAndTasks("job1", 2)).getLeft();
Job<?> job2 = addToJobDataReplicator(newJobAndTasks("job2", 4)).getLeft();
Job<?> job3 = addToJobDataReplicator(newJobAndTasks("job3", 0)).getLeft();
// Expect two pages
JobQueryResult page1Result = processor.findJobs(JOB_QUERY_ALL_WITH_PAGE_SIZE_2);
assertThat(page1Result.getItemsList()).hasSize(2);
Page page2 = Page.newBuilder().setPageSize(2).setCursor(page1Result.getPagination().getCursor()).build();
JobQueryResult page2Result = processor.findJobs(JobQuery.newBuilder().setPage(page2).build());
assertThat(page2Result.getItemsList()).hasSize(1);
assertThat(page2Result.getPagination().getHasMore()).isFalse();
List<String> jobIds = CollectionsExt.merge(
page1Result.getItemsList().stream().map(com.netflix.titus.grpc.protogen.Job::getId).collect(Collectors.toList()),
page2Result.getItemsList().stream().map(com.netflix.titus.grpc.protogen.Job::getId).collect(Collectors.toList())
);
assertThat(jobIds).contains(job1.getId(), job2.getId(), job3.getId());
}
@Test
public void testFindJobsWithFieldsFilter() {
Job<?> job1 = addToJobDataReplicator(newJobAndTasks("job1", 2)).getLeft();
JobQueryResult page1Result = processor.findJobs(JobQuery.newBuilder()
.addFields("status")
.setPage(Page.newBuilder().setPageSize(2).build())
.build()
);
assertThat(page1Result.getItemsList()).hasSize(1);
com.netflix.titus.grpc.protogen.Job fetchedJob = page1Result.getItemsList().get(0);
// Check that expected fields are set
assertThat(fetchedJob.getId()).isEqualTo(job1.getId());
assertThat(fetchedJob.getStatus().getReasonMessage()).isEqualTo(job1.getStatus().getReasonMessage());
// Check that not requested fields are not set
assertThat(fetchedJob.getJobDescriptor()).isEqualTo(com.netflix.titus.grpc.protogen.JobDescriptor.getDefaultInstance());
}
@Test
public void testFindTasks() {
List<Task> tasks1 = addToJobDataReplicator(newJobAndTasks("job1", 2)).getRight();
List<Task> tasks2 = addToJobDataReplicator(newJobAndTasks("job2", 4)).getRight();
List<Task> tasks3 = addToJobDataReplicator(newJobAndTasks("job3", 0)).getRight();
// Expect three pages
TaskQueryResult page1Result = processor.findTasks(TASK_QUERY_ALL_WITH_PAGE_SIZE_2);
assertThat(page1Result.getItemsList()).hasSize(2);
Page page2 = Page.newBuilder().setPageSize(2).setCursor(page1Result.getPagination().getCursor()).build();
TaskQueryResult page2Result = processor.findTasks(TaskQuery.newBuilder().setPage(page2).build());
assertThat(page2Result.getItemsList()).hasSize(2);
Page page3 = Page.newBuilder().setPageSize(2).setCursor(page2Result.getPagination().getCursor()).build();
TaskQueryResult page3Result = processor.findTasks(TaskQuery.newBuilder().setPage(page3).build());
assertThat(page3Result.getItemsList()).hasSize(2);
assertThat(page3Result.getPagination().getHasMore()).isFalse();
List<String> taskIds = CollectionsExt.merge(
page1Result.getItemsList().stream().map(com.netflix.titus.grpc.protogen.Task::getId).collect(Collectors.toList()),
page2Result.getItemsList().stream().map(com.netflix.titus.grpc.protogen.Task::getId).collect(Collectors.toList()),
page3Result.getItemsList().stream().map(com.netflix.titus.grpc.protogen.Task::getId).collect(Collectors.toList())
);
List<String> expectedTaskIds = CollectionsExt.merge(
tasks1.stream().map(Task::getId).collect(Collectors.toList()),
tasks2.stream().map(Task::getId).collect(Collectors.toList()),
tasks3.stream().map(Task::getId).collect(Collectors.toList())
);
assertThat(taskIds).containsAll(expectedTaskIds);
}
@Test
public void testFindTasksWithFieldsFilter() {
List<Task> tasks1 = addToJobDataReplicator(newJobAndTasks("job1", 2)).getRight();
TaskQueryResult page1Result = processor.findTasks(TaskQuery.newBuilder()
.addFields("status")
.setPage(Page.newBuilder().setPageSize(2).build())
.build()
);
assertThat(page1Result.getItemsList()).hasSize(2);
Task originalTask = tasks1.get(0);
com.netflix.titus.grpc.protogen.Task fetchedTask = page1Result.getItemsList().stream()
.filter(t -> t.getId().equals(originalTask.getId()))
.findFirst().get();
// Check that expected fields are set
assertThat(fetchedTask.getId()).isEqualTo(originalTask.getId());
assertThat(fetchedTask.getStatus().getReasonMessage()).isEqualTo(originalTask.getStatus().getReasonMessage());
// Check that not requested fields are not set
assertThat(fetchedTask.getLogLocation()).isEqualTo(LogLocation.getDefaultInstance());
}
@Test
public void testFindTask() {
Task task1 = addToJobDataReplicator(newJobAndTasks("job1", 2)).getRight().get(0);
Task task2 = addToJobDataReplicator(newJobAndTasks("job2", 4)).getRight().get(0);
for (Task task : Arrays.asList(task1, task2)) {
com.netflix.titus.grpc.protogen.Task result = processor.findTask(task.getId()).orElse(null);
assertThat(result).isNotNull();
assertThat(result.getId()).isEqualTo(task.getId());
}
}
@Test
public void testObserveJobsEmitsEmptySnapshotIfNoJobsAreRunning() throws InterruptedException {
ExtTestSubscriber<JobChangeNotification> subscriber = new ExtTestSubscriber<>();
processor.observeJobs(ObserveJobsQuery.getDefaultInstance()).subscribe(subscriber);
emitEvent(Pair.of(jobDataReplicator.getCurrent(), JobManagerEvent.snapshotMarker()));
JobChangeNotification receivedEvent = subscriber.takeNext(30, TimeUnit.SECONDS);
assertThat(receivedEvent.getNotificationCase()).isEqualTo(JobChangeNotification.NotificationCase.SNAPSHOTEND);
}
@Test
public void testObserveJobs() throws InterruptedException {
ExtTestSubscriber<JobChangeNotification> subscriber = new ExtTestSubscriber<>();
processor.observeJobs(ObserveJobsQuery.getDefaultInstance()).subscribe(subscriber);
Pair<Job<?>, List<Task>> jobAndTasks = addToJobDataReplicator(newJobAndTasks("job1", 2));
Job<?> job = jobAndTasks.getLeft();
Task task1 = jobAndTasks.getRight().get(0);
Task task2 = jobAndTasks.getRight().get(1);
// Job update event, which also triggers snapshot
JobUpdateEvent jobUpdateEvent = JobUpdateEvent.newJob(job, JUNIT_CALL_METADATA);
emitEvent(Pair.of(jobDataReplicator.getCurrent(), jobUpdateEvent));
expectJobUpdateEvent(subscriber, j -> assertThat(j.getId()).isEqualTo(job.getId()));
expectTaskUpdateEvent(subscriber, t -> assertThat(t.getId()).isIn(task1.getId(), task2.getId()));
expectTaskUpdateEvent(subscriber, t -> assertThat(t.getId()).isIn(task1.getId(), task2.getId()));
expectSnapshot(subscriber);
expectJobUpdateEvent(subscriber, j -> assertThat(j.getId()).isEqualTo(job.getId()));
// Task update event
TaskUpdateEvent taskUpdateEvent = TaskUpdateEvent.newTask(job, task1, JUNIT_CALL_METADATA);
emitEvent(Pair.of(jobDataReplicator.getCurrent(), taskUpdateEvent));
expectTaskUpdateEvent(subscriber, t -> assertThat(t.getId()).isEqualTo(task1.getId()));
// Job replicator re-sends events if there is nothing new to keep the stream active. Make sure that
// we filter the keep alive events.
TaskUpdateEvent taskUpdateEvent2 = TaskUpdateEvent.newTask(job, task2, JUNIT_CALL_METADATA);
emitEvent(Pair.of(jobDataReplicator.getCurrent(), JobManagerEvent.keepAliveEvent(-1)));
emitEvent(Pair.of(jobDataReplicator.getCurrent(), taskUpdateEvent2));
expectTaskUpdateEvent(subscriber, t -> assertThat(t.getId()).isEqualTo(task2.getId()));
// Now repeat taskUpdateEvent which this time should go through.
emitEvent(Pair.of(jobDataReplicator.getCurrent(), taskUpdateEvent));
expectTaskUpdateEvent(subscriber, t -> assertThat(t.getId()).isEqualTo(task1.getId()));
// Check that is correctly terminated
jobDataReplicatorSink.tryEmitError(new RuntimeException("simulated stream error"));
await().until(() -> subscriber.getError() != null);
assertThat(subscriber.getError()).isInstanceOf(RuntimeException.class);
}
@Test
public void testObserveJobsWithFieldsFiltering() throws InterruptedException {
Pair<Job<?>, List<Task>> jobAndTasks = addToJobDataReplicator(newJobAndTasks("job1", 1));
Job<?> job1 = jobAndTasks.getLeft();
Task task1 = jobAndTasks.getRight().get(0);
ExtTestSubscriber<JobChangeNotification> subscriber = new ExtTestSubscriber<>();
ObserveJobsQuery query = ObserveJobsQuery.newBuilder()
.addJobFields("status")
.addTaskFields("taskContext")
.build();
processor.observeJobs(query).subscribe(subscriber);
// Snapshot
emitEvent(Pair.of(jobDataReplicator.getCurrent(), JobManagerEvent.snapshotMarker()));
expectJobUpdateEvent(subscriber, j -> {
assertThat(j.getId()).isEqualTo(job1.getId());
assertThat(j.getStatus().getReasonMessage()).isEqualTo("<not_given>");
assertThat(j.getJobDescriptor()).isEqualTo(JobDescriptor.getDefaultInstance());
});
expectTaskUpdateEvent(subscriber, t -> {
assertThat(t.getId()).isEqualTo(task1.getId());
assertThat(t.getStatus()).isEqualTo(TaskStatus.getDefaultInstance());
assertThat(t.getTaskContextMap()).containsAllEntriesOf(task1.getTaskContext());
});
expectSnapshot(subscriber);
// Updates after snapshot marker
Pair<Job<?>, List<Task>> jobAndTasks2 = addToJobDataReplicator(newJobAndTasks("job2", 1));
Job<?> job2 = jobAndTasks2.getLeft();
Task task2 = jobAndTasks2.getRight().get(0);
JobUpdateEvent jobUpdateEvent = JobUpdateEvent.newJob(job2, JUNIT_CALL_METADATA);
TaskUpdateEvent taskUpdateEvent = TaskUpdateEvent.newTask(job2, task2, JUNIT_CALL_METADATA);
emitEvent(Pair.of(jobDataReplicator.getCurrent(), jobUpdateEvent));
emitEvent(Pair.of(jobDataReplicator.getCurrent(), taskUpdateEvent));
expectJobUpdateEvent(subscriber, j -> {
assertThat(j.getId()).isEqualTo(job2.getId());
assertThat(j.getStatus().getReasonMessage()).isEqualTo("<not_given>");
assertThat(j.getJobDescriptor()).isEqualTo(JobDescriptor.getDefaultInstance());
});
expectTaskUpdateEvent(subscriber, t -> {
assertThat(t.getId()).isEqualTo(task2.getId());
assertThat(t.getStatus()).isEqualTo(TaskStatus.getDefaultInstance());
assertThat(t.getTaskContextMap()).containsAllEntriesOf(task2.getTaskContext());
});
}
@Test
public void testObserveJob() throws InterruptedException {
ExtTestSubscriber<JobChangeNotification> subscriber = new ExtTestSubscriber<>();
processor.observeJob("job1").subscribe(subscriber);
Pair<Job<?>, List<Task>> jobAndTasks = addToJobDataReplicator(newJobAndTasks("job1", 2));
Job<?> job2 = addToJobDataReplicator(newJobAndTasks("job2", 2)).getLeft();
Job<?> job1 = jobAndTasks.getLeft();
Task task1 = jobAndTasks.getRight().get(0);
Task task2 = jobAndTasks.getRight().get(1);
// Job update event, which also triggers snapshot
emitEvent(Pair.of(jobDataReplicator.getCurrent(), JobUpdateEvent.newJob(job2, JUNIT_CALL_METADATA)));
emitEvent(Pair.of(jobDataReplicator.getCurrent(), JobUpdateEvent.newJob(job1, JUNIT_CALL_METADATA)));
expectJobUpdateEvent(subscriber, j -> assertThat(j.getId()).isEqualTo(job1.getId()));
expectTaskUpdateEvent(subscriber, t -> assertThat(t.getId()).isIn(task1.getId(), task2.getId()));
expectTaskUpdateEvent(subscriber, t -> assertThat(t.getId()).isIn(task1.getId(), task2.getId()));
expectSnapshot(subscriber);
expectJobUpdateEvent(subscriber, j -> assertThat(j.getId()).isEqualTo(job1.getId()));
// Task update event
TaskUpdateEvent taskUpdateEvent = TaskUpdateEvent.newTask(job1, task1, JUNIT_CALL_METADATA);
emitEvent(Pair.of(jobDataReplicator.getCurrent(), taskUpdateEvent));
expectTaskUpdateEvent(subscriber, t -> assertThat(t.getId()).isEqualTo(task1.getId()));
// Check that is correctly terminated
jobDataReplicatorSink.tryEmitError(new RuntimeException("simulated stream error"));
await().until(() -> subscriber.getError() != null);
assertThat(subscriber.getError()).isInstanceOf(RuntimeException.class);
}
private void expectSnapshot(ExtTestSubscriber<JobChangeNotification> subscriber) throws InterruptedException {
JobChangeNotification receivedEvent = subscriber.takeNext(30, TimeUnit.SECONDS);
assertThat(receivedEvent).isNotNull();
assertThat(receivedEvent.getNotificationCase()).isEqualTo(JobChangeNotification.NotificationCase.SNAPSHOTEND);
}
private void expectJobUpdateEvent(ExtTestSubscriber<JobChangeNotification> subscriber, Consumer<com.netflix.titus.grpc.protogen.Job> verifier) throws InterruptedException {
JobChangeNotification receivedEvent = subscriber.takeNext(30, TimeUnit.SECONDS);
assertThat(receivedEvent).isNotNull();
assertThat(receivedEvent.getNotificationCase()).isEqualTo(JobChangeNotification.NotificationCase.JOBUPDATE);
verifier.accept(receivedEvent.getJobUpdate().getJob());
}
private void expectTaskUpdateEvent(ExtTestSubscriber<JobChangeNotification> subscriber, Consumer<com.netflix.titus.grpc.protogen.Task> verifier) throws InterruptedException {
JobChangeNotification receivedEvent = subscriber.takeNext(30, TimeUnit.SECONDS);
assertThat(receivedEvent).isNotNull();
assertThat(receivedEvent.getNotificationCase()).isEqualTo(JobChangeNotification.NotificationCase.TASKUPDATE);
verifier.accept(receivedEvent.getTaskUpdate().getTask());
}
@Test
public void testSync() {
long startTime = titusRuntime.getClock().wallTime() - 10;
ExtTestSubscriber<Task> subscriber = new ExtTestSubscriber<>();
processor.syncCache("test", Task.class).subscribe(subscriber);
assertThat(subscriber.isUnsubscribed()).isFalse();
// Move forward, but not ahead of the current time threshold
jobDataReplicatorCheckpointSink.emitNext(startTime + 1, Sinks.EmitFailureHandler.FAIL_FAST);
assertThat(subscriber.isUnsubscribed()).isFalse();
// Now pass the threshold
jobDataReplicatorCheckpointSink.emitNext(startTime + 60_000, Sinks.EmitFailureHandler.FAIL_FAST);
await().until(subscriber::isUnsubscribed);
}
private Pair<Job<?>, List<Task>> addToJobDataReplicator(Pair<Job<?>, List<Task>> jobAndTasks) {
JobSnapshot updated = jobDataReplicator.getCurrent().updateJob(jobAndTasks.getLeft()).orElse(jobDataReplicator.getCurrent());
for (Task task : jobAndTasks.getRight()) {
updated = updated.updateTask(task, false).orElse(updated);
}
when(jobDataReplicator.getCurrent()).thenReturn(updated);
return jobAndTasks;
}
private static Pair<Job<?>, List<Task>> newJobAndTasks(String jobId, int taskCount) {
Job<BatchJobExt> job = JobFunctions.changeBatchJobSize(
JobGenerator.oneBatchJob().toBuilder().withId(jobId).build(),
taskCount
);
List<Task> tasks = (List) JobGenerator.batchTasks(job).getValues(taskCount);
return Pair.of(job, tasks);
}
private void emitEvent(Pair<JobSnapshot, JobManagerEvent<?>> e) {
jobDataReplicatorSink.emitNext(e, Sinks.EmitFailureHandler.FAIL_FAST);
}
} | 9,363 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3/internal/ExtendedJobSanitizerTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import com.google.common.collect.ImmutableMap;
import com.netflix.titus.api.FeatureRolloutPlans;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.SecurityProfile;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.DisruptionBudget;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.SelfManagedDisruptionBudgetPolicy;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.model.job.sanitizer.JobAssertions;
import com.netflix.titus.api.jobmanager.model.job.sanitizer.JobConfiguration;
import com.netflix.titus.api.model.ResourceDimension;
import com.netflix.titus.api.service.TitusServiceException;
import com.netflix.titus.common.data.generator.DataGenerator;
import com.netflix.titus.common.model.sanitizer.EntitySanitizer;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.archaius2.Archaius2Ext;
import com.netflix.titus.runtime.jobmanager.JobManagerConfiguration;
import com.netflix.titus.testkit.model.eviction.DisruptionBudgetGenerator;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import org.junit.Before;
import org.junit.Test;
import static com.netflix.titus.api.FeatureRolloutPlans.ENVIRONMENT_VARIABLE_NAMES_STRICT_VALIDATION_FEATURE;
import static com.netflix.titus.api.jobmanager.JobAttributes.JOB_CONTAINER_ATTRIBUTE_ACCOUNT_ID;
import static com.netflix.titus.api.jobmanager.JobAttributes.JOB_CONTAINER_ATTRIBUTE_SUBNETS;
import static com.netflix.titus.gateway.service.v3.internal.DisruptionBudgetSanitizer.BATCH_RUNTIME_LIMIT_FACTOR;
import static com.netflix.titus.gateway.service.v3.internal.ExtendedJobSanitizer.TITUS_NON_COMPLIANT_FEATURES;
import static java.util.Arrays.asList;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class ExtendedJobSanitizerTest {
private static final int MIN_DISK_SIZE = 10_000;
private static final List<String> DEFAULT_SECURITY_GROUPS = asList("sg-1", "sg-2");
private static final String DEFAULT_IAM_ROLE = "defaultIamRole";
private static final DisruptionBudget SAMPLE_DISRUPTION_BUDGET = DisruptionBudgetGenerator.budget(
DisruptionBudgetGenerator.percentageOfHealthyPolicy(80),
DisruptionBudgetGenerator.unlimitedRate(),
Collections.singletonList(DisruptionBudgetGenerator.officeHourTimeWindow())
);
private final TitusRuntime titusRuntime = TitusRuntimes.internal();
private final JobManagerConfiguration configuration = mock(JobManagerConfiguration.class);
private final DisruptionBudgetSanitizerConfiguration disruptionBudgetSanitizerConfiguration = Archaius2Ext.newConfiguration(
DisruptionBudgetSanitizerConfiguration.class,
"titusGateway.disruptionBudgetSanitizer.enabled", "true"
);
private final JobConfiguration jobConfiguration = mock(JobConfiguration.class);
private final EntitySanitizer entitySanitizer = mock(EntitySanitizer.class);
private final DisruptionBudgetSanitizer disruptionBudgetSanitizer = new DisruptionBudgetSanitizer(disruptionBudgetSanitizerConfiguration, titusRuntime);
private final JobAssertions jobAssertions = new JobAssertions(jobConfiguration, instance -> ResourceDimension.empty());
@Before
public void setUp() {
when(configuration.getNoncompliantClientWhiteList()).thenReturn("_none_");
}
@Test
public void testSecurityGroupsAndNoValidationFailures() {
testSecurityGroupValidation(false, DEFAULT_SECURITY_GROUPS);
}
@Test
public void testSecurityGroupsWithValidationFailures() {
testSecurityGroupValidation(true, Collections.emptyList());
}
private void testSecurityGroupValidation(boolean doNotAddIfMissing, List<String> expected) {
JobDescriptor<BatchJobExt> jobDescriptor = newJobDescriptorWithSecurityProfile(Collections.emptyList(), "myIamRole");
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> doNotAddIfMissing, jd -> false, titusRuntime);
when(configuration.getDefaultSecurityGroups()).thenReturn(asList("sg-1", "sg-2"));
Optional<JobDescriptor<BatchJobExt>> sanitized = sanitizer.sanitize(jobDescriptor);
assertThat(sanitized).isPresent();
assertThat(sanitized.get().getContainer().getSecurityProfile().getSecurityGroups()).isEqualTo(expected);
}
@Test
public void testAccountIdSubnetsWithViolationCondition() {
String defaultAccountId = "1000";
String defaultSubnets = "subnet-1,subnet-2";
when(configuration.getDefaultContainerAccountId()).thenReturn(defaultAccountId);
when(configuration.getDefaultSubnets()).thenReturn(defaultSubnets);
// No accountId and subnets attributes in the job descriptor
testAccountIdSubnetsValidationViolationExpected(defaultAccountId, defaultSubnets, newBatchJob().getValue(), defaultAccountId, defaultSubnets);
testAccountIdSubnetsValidationViolationExpected(defaultAccountId, defaultSubnets,
newBatchJobDescriptorWithContainerAttributes(ImmutableMap.of(JOB_CONTAINER_ATTRIBUTE_ACCOUNT_ID, defaultAccountId)), defaultAccountId, defaultSubnets);
}
private void testAccountIdSubnetsValidationViolationExpected(String defaultAccountId, String defaultSubnets, JobDescriptor<BatchJobExt> jobDescriptor, String expectedAccountId, String expectedSubnets) {
Optional<JobDescriptor<BatchJobExt>> jobDescriptorOptional = testAccountIdSubnetsValidation(defaultAccountId, defaultSubnets, jobDescriptor);
assertThat(jobDescriptorOptional).isPresent();
Map<String, String> containerAttributes = jobDescriptorOptional.get().getContainer().getAttributes();
assertThat(containerAttributes.get(JOB_CONTAINER_ATTRIBUTE_ACCOUNT_ID)).isEqualTo(expectedAccountId);
assertThat(containerAttributes.get(JOB_CONTAINER_ATTRIBUTE_SUBNETS)).isEqualTo(expectedSubnets);
}
@Test
public void testAccountIdSubnetsWithNoViolationCondition() {
// No violation is expected for each condition below
// 1. No defaults are specified for the accountId and subnets
testAccountIdSubnetsValidationNoViolationExpected("", "", newBatchJob().getValue());
// 2 and 3. Default is defined for only one of the two attributes
testAccountIdSubnetsValidationNoViolationExpected("1000", "", newBatchJob().getValue());
testAccountIdSubnetsValidationNoViolationExpected("", "subnet-1", newBatchJob().getValue());
// 4. JobDescriptor only has accountId and it is different from the default accountId. No violation despite having no subnets defined
testAccountIdSubnetsValidationNoViolationExpected("1000", "subnet-1",
newBatchJobDescriptorWithContainerAttributes(ImmutableMap.of(JOB_CONTAINER_ATTRIBUTE_ACCOUNT_ID, "1001")));
// 5. JobDescriptor only has subnets and no accountId.
testAccountIdSubnetsValidationNoViolationExpected("1000", "subnet-1",
newBatchJobDescriptorWithContainerAttributes(ImmutableMap.of(JOB_CONTAINER_ATTRIBUTE_SUBNETS, "subnet-2")));
// 6. JobDescriptor contains accountId and subnets different from the defaults
testAccountIdSubnetsValidationNoViolationExpected("1000", "subnet-1", newBatchJobDescriptorWithContainerAttributes("1001", "subnet-2"));
// 7. JobDescriptor contains default values for both attributes
testAccountIdSubnetsValidationNoViolationExpected("1000", "subnet-1", newBatchJobDescriptorWithContainerAttributes("1000", "subnet-1"));
}
private void testAccountIdSubnetsValidationNoViolationExpected(String defaultAccountId, String defaultSubnets, JobDescriptor<BatchJobExt> jobDescriptor) {
assertThat(testAccountIdSubnetsValidation(defaultAccountId, defaultSubnets, jobDescriptor)).isEmpty();
}
private Optional<JobDescriptor<BatchJobExt>> testAccountIdSubnetsValidation(String defaultAccountId, String defaultSubnets, JobDescriptor<BatchJobExt> jobDescriptor) {
when(configuration.getDefaultContainerAccountId()).thenReturn(defaultAccountId);
when(configuration.getDefaultSubnets()).thenReturn(defaultSubnets);
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> false, jd -> false, titusRuntime);
return sanitizer.sanitize(jobDescriptor);
}
private JobDescriptor<BatchJobExt> newBatchJobDescriptorWithContainerAttributes(String accountId, String subnets) {
return newBatchJobDescriptorWithContainerAttributes(ImmutableMap.of(JOB_CONTAINER_ATTRIBUTE_ACCOUNT_ID, accountId, JOB_CONTAINER_ATTRIBUTE_SUBNETS, subnets));
}
private JobDescriptor<BatchJobExt> newBatchJobDescriptorWithContainerAttributes(Map<String, String> containerAttributes) {
DataGenerator<JobDescriptor<BatchJobExt>> jobDescriptorDataGenerator = newBatchJob();
JobDescriptor<BatchJobExt> jobDescriptor;
if (!CollectionsExt.isNullOrEmpty(containerAttributes)) {
jobDescriptor = jobDescriptorDataGenerator.map(jd -> jd.but(d -> d.getContainer().toBuilder().withAttributes(containerAttributes))).getValue();
} else {
jobDescriptor = jobDescriptorDataGenerator.getValue();
}
return jobDescriptor;
}
@Test
public void testIamRoleAndNoValidationFailures() {
testIamRoleValidation(false, DEFAULT_IAM_ROLE);
}
@Test
public void testIamRoleWithValidationFailures() {
testIamRoleValidation(true, "");
}
private void testIamRoleValidation(boolean doNotAddIfMissing, String expected) {
JobDescriptor<BatchJobExt> jobDescriptor = newJobDescriptorWithSecurityProfile(DEFAULT_SECURITY_GROUPS, "");
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> doNotAddIfMissing, jd -> false, titusRuntime);
when(configuration.getDefaultIamRole()).thenReturn(DEFAULT_IAM_ROLE);
Optional<JobDescriptor<BatchJobExt>> sanitized = sanitizer.sanitize(jobDescriptor);
assertThat(sanitized).isPresent();
assertThat(sanitized.get().getContainer().getSecurityProfile().getIamRole()).isEqualTo(expected);
}
@Test
public void testDiskSizeIsChangedToMin() {
JobDescriptor<BatchJobExt> jobDescriptor = newJobDescriptorWithDiskSize(100);
when(configuration.getMinDiskSizeMB()).thenReturn(MIN_DISK_SIZE);
when(entitySanitizer.sanitize(any())).thenReturn(Optional.of(jobDescriptor));
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> false, jd -> false, titusRuntime);
Optional<JobDescriptor<BatchJobExt>> sanitizedJobDescriptorOpt = sanitizer.sanitize(jobDescriptor);
JobDescriptor<BatchJobExt> sanitizedJobDescriptor = sanitizedJobDescriptorOpt.get();
assertThat(sanitizedJobDescriptor).isNotNull();
assertThat(sanitizedJobDescriptor.getContainer().getContainerResources().getDiskMB()).isEqualTo(MIN_DISK_SIZE);
String nonCompliant = sanitizedJobDescriptor.getAttributes().get(TITUS_NON_COMPLIANT_FEATURES);
assertThat(nonCompliant).contains(FeatureRolloutPlans.MIN_DISK_SIZE_STRICT_VALIDATION_FEATURE);
}
@Test
public void testDiskSizeIsNotChanged() {
JobDescriptor<BatchJobExt> jobDescriptor = newJobDescriptorWithDiskSize(11_000);
when(configuration.getMinDiskSizeMB()).thenReturn(MIN_DISK_SIZE);
when(entitySanitizer.sanitize(any())).thenReturn(Optional.of(jobDescriptor));
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> false, jd -> false, titusRuntime);
Optional<JobDescriptor<BatchJobExt>> sanitizedJobDescriptorOpt = sanitizer.sanitize(jobDescriptor);
assertThat(sanitizedJobDescriptorOpt).isEmpty();
}
@Test
public void testFlatStringEntryPoint() {
JobDescriptor<?> jobDescriptor = newBatchJob()
.map(jd -> jd.but(d -> d.getContainer().toBuilder()
.withEntryPoint(Collections.singletonList("/bin/sh -c \"sleep 10\""))
.withCommand(null)))
.getValue();
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> false, jd -> false, titusRuntime);
Optional<JobDescriptor<?>> sanitized = sanitizer.sanitize(jobDescriptor);
assertThat(sanitized).isPresent();
Map<String, String> attributes = sanitized.get().getAttributes();
assertThat(attributes).containsKey(TITUS_NON_COMPLIANT_FEATURES);
List<String> problems = asList(attributes.get(TITUS_NON_COMPLIANT_FEATURES).split(","));
assertThat(problems).contains(FeatureRolloutPlans.ENTRY_POINT_STRICT_VALIDATION_FEATURE);
}
@Test
public void testValidEntryPoint() {
JobDescriptor<?> jobDescriptor = newBatchJob()
.map(jd -> jd.but(d -> d.getContainer().toBuilder()
.withEntryPoint(asList("/bin/sh", "-c", "sleep 10"))))
.getValue();
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> false, jd -> false, titusRuntime);
Optional<JobDescriptor<?>> sanitized = sanitizer.sanitize(jobDescriptor);
assertThat(sanitized).isNotPresent();
}
@Test
public void testJobsWithCommandAreNotMarkedNonCompliant() {
// ... because they never relied on shell parsing
JobDescriptor<?> jobDescriptor = newBatchJob()
.map(jd -> jd.but(d -> d.getContainer().toBuilder()
.withEntryPoint(Collections.singletonList("a binary with spaces"))
.withCommand(asList("some", "arguments"))))
.getValue();
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> false, jd -> false, titusRuntime);
Optional<JobDescriptor<?>> sanitized = sanitizer.sanitize(jobDescriptor);
assertThat(sanitized).isNotPresent();
}
@Test
public void testEnvironmentNamesWithInvalidCharactersAndNoValidationFailures() {
JobDescriptor<BatchJobExt> jobDescriptor = newJobDescriptorWithEnvironment(";;;", "value");
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> false, jd -> false, titusRuntime);
Optional<JobDescriptor<BatchJobExt>> sanitized = sanitizer.sanitize(jobDescriptor);
assertThat(sanitized).isNotEmpty();
assertThat(sanitized.get().getAttributes().get(TITUS_NON_COMPLIANT_FEATURES)).isEqualTo(ENVIRONMENT_VARIABLE_NAMES_STRICT_VALIDATION_FEATURE);
}
@Test(expected = TitusServiceException.class)
public void testEnvironmentNamesWithInvalidCharactersAndWithValidationFailures() {
JobDescriptor<BatchJobExt> jobDescriptor = newJobDescriptorWithEnvironment(";;;", "value");
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> false, jd -> true, titusRuntime);
sanitizer.sanitize(jobDescriptor);
}
@Test
public void testTitusAttributesAreResetIfProvidedByUser() {
JobDescriptor<BatchJobExt> jobDescriptor = newBatchJob().getValue().toBuilder()
.withAttributes(ImmutableMap.<String, String>builder()
.put("myApp.a", "b")
.put(TITUS_NON_COMPLIANT_FEATURES + "a", "b")
.build()
)
.build();
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> false, jd -> false, titusRuntime);
Optional<JobDescriptor<BatchJobExt>> sanitized = sanitizer.sanitize(jobDescriptor);
assertThat(sanitized).isNotEmpty();
assertThat(sanitized.get().getAttributes()).containsOnlyKeys("myApp.a");
}
@Test
public void testLegacyServiceJobDisruptionBudgetRewrite() {
JobDescriptor<ServiceJobExt> jobDescriptor = newServiceJob().getValue().toBuilder()
.withDisruptionBudget(DisruptionBudget.none())
.build();
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> false, jd -> false, titusRuntime);
Optional<JobDescriptor<ServiceJobExt>> sanitizedOpt = sanitizer.sanitize(jobDescriptor);
assertThat(sanitizedOpt).isNotEmpty();
JobDescriptor<ServiceJobExt> sanitized = sanitizedOpt.get();
String nonCompliant = sanitized.getAttributes().get(TITUS_NON_COMPLIANT_FEATURES);
assertThat(nonCompliant).contains(JobFeatureComplianceChecks.DISRUPTION_BUDGET_FEATURE);
SelfManagedDisruptionBudgetPolicy policy = (SelfManagedDisruptionBudgetPolicy) sanitized.getDisruptionBudget().getDisruptionBudgetPolicy();
assertThat(policy.getRelocationTimeMs()).isEqualTo(DisruptionBudgetSanitizer.DEFAULT_SERVICE_RELOCATION_TIME_MS);
}
@Test
public void testLegacyBatchJobDisruptionBudgetRewrite() {
JobDescriptor<BatchJobExt> jobDescriptor = newBatchJob().getValue().toBuilder()
.withDisruptionBudget(DisruptionBudget.none())
.build();
ExtendedJobSanitizer sanitizer = new ExtendedJobSanitizer(configuration, jobAssertions, entitySanitizer, disruptionBudgetSanitizer, jd -> false, jd -> false, titusRuntime);
Optional<JobDescriptor<BatchJobExt>> sanitizedOpt = sanitizer.sanitize(jobDescriptor);
assertThat(sanitizedOpt).isNotEmpty();
JobDescriptor<BatchJobExt> sanitized = sanitizedOpt.get();
String nonCompliant = sanitized.getAttributes().get(TITUS_NON_COMPLIANT_FEATURES);
assertThat(nonCompliant).contains(JobFeatureComplianceChecks.DISRUPTION_BUDGET_FEATURE);
SelfManagedDisruptionBudgetPolicy policy = (SelfManagedDisruptionBudgetPolicy) sanitized.getDisruptionBudget().getDisruptionBudgetPolicy();
assertThat(policy.getRelocationTimeMs()).isEqualTo(
(long) ((jobDescriptor.getExtensions()).getRuntimeLimitMs() * BATCH_RUNTIME_LIMIT_FACTOR)
);
}
private DataGenerator<JobDescriptor<BatchJobExt>> newBatchJob() {
return JobDescriptorGenerator.batchJobDescriptors().map(jobDescriptor ->
jobDescriptor.toBuilder().withDisruptionBudget(SAMPLE_DISRUPTION_BUDGET).build()
);
}
private DataGenerator<JobDescriptor<ServiceJobExt>> newServiceJob() {
return JobDescriptorGenerator.serviceJobDescriptors().map(jobDescriptor ->
jobDescriptor.toBuilder().withDisruptionBudget(SAMPLE_DISRUPTION_BUDGET).build()
);
}
private JobDescriptor<BatchJobExt> newJobDescriptorWithSecurityProfile(List<String> securityGroups, String iamRole) {
SecurityProfile securityProfile = SecurityProfile.newBuilder()
.withIamRole(iamRole)
.withSecurityGroups(securityGroups)
.build();
return newBatchJob()
.map(jd -> jd.but(d -> d.getContainer().but(c -> c.toBuilder().withSecurityProfile(securityProfile).build())))
.getValue();
}
private JobDescriptor<BatchJobExt> newJobDescriptorWithEnvironment(String key, String value) {
return newBatchJob()
.map(jd -> jd.but(d -> d.getContainer().but(c -> c.toBuilder().withEnv(Collections.singletonMap(key, value)).build())))
.getValue();
}
private JobDescriptor<BatchJobExt> newJobDescriptorWithDiskSize(int diskSize) {
return newBatchJob()
.map(jd -> jd.but(d -> d.getContainer().but(c -> c.getContainerResources().toBuilder().withDiskMB(diskSize))))
.getValue();
}
}
| 9,364 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3/internal/TaskDataInjectorTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.Arrays;
import java.util.HashMap;
import com.netflix.titus.api.FeatureActivationConfiguration;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan.TaskRelocationReason;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.grpc.protogen.BasicImage;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.MigrationDetails;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskQueryResult;
import com.netflix.titus.runtime.connector.GrpcClientConfiguration;
import com.netflix.titus.runtime.connector.kubernetes.fabric8io.Fabric8IOConnector;
import com.netflix.titus.runtime.connector.relocation.RelocationDataReplicator;
import com.netflix.titus.runtime.connector.relocation.TaskRelocationSnapshot;
import com.netflix.titus.runtime.endpoint.common.EmptyLogStorageInfo;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters;
import com.netflix.titus.testkit.model.job.JobGenerator;
import io.fabric8.kubernetes.api.model.ContainerStatus;
import io.fabric8.kubernetes.api.model.ObjectMeta;
import io.fabric8.kubernetes.api.model.Pod;
import io.fabric8.kubernetes.api.model.PodStatus;
import org.junit.Before;
import org.junit.Test;
import rx.Observable;
import rx.schedulers.Schedulers;
import rx.schedulers.TestScheduler;
import static com.netflix.titus.gateway.service.v3.internal.TaskDataInjector.buildBasicImageFromContainerStatus;
import static com.netflix.titus.runtime.kubernetes.KubeConstants.ANNOTATION_KEY_IMAGE_TAG_PREFIX;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TaskDataInjectorTest {
private static final long REQUEST_TIMEOUT_MS = 1_000L;
private final TestScheduler testScheduler = Schedulers.test();
private final TitusRuntime titusRuntime = TitusRuntimes.test(testScheduler);
private static final Task TASK1 = GrpcJobManagementModelConverters.toGrpcTask(JobGenerator.oneBatchTask().toBuilder().withId("task1").build(), EmptyLogStorageInfo.empty());
private static final Task TASK2 = GrpcJobManagementModelConverters.toGrpcTask(JobGenerator.oneBatchTask().toBuilder().withId("task2").build(), EmptyLogStorageInfo.empty());
private final GrpcClientConfiguration grpcConfiguration = mock(GrpcClientConfiguration.class);
private final FeatureActivationConfiguration featureActivationConfiguration = mock(FeatureActivationConfiguration.class);
private final RelocationDataReplicator relocationDataReplicator = mock(RelocationDataReplicator.class);
private final Fabric8IOConnector kubeApiConnector = mock(Fabric8IOConnector.class);
private final TaskDataInjector taskDataInjector = new TaskDataInjector(featureActivationConfiguration, relocationDataReplicator, kubeApiConnector);
@Before
public void setUp() {
when(grpcConfiguration.getRequestTimeout()).thenReturn(REQUEST_TIMEOUT_MS);
when(featureActivationConfiguration.isMergingTaskMigrationPlanInGatewayEnabled()).thenReturn(true);
}
@Test
public void testTaskUpdateEventWithRelocationDeadline() {
long deadlineTimestamp = titusRuntime.getClock().wallTime() + 1_000;
when(relocationDataReplicator.getCurrent()).thenReturn(
newRelocationSnapshot(newRelocationPlan(TASK1, deadlineTimestamp))
);
JobChangeNotification event = JobChangeNotification.newBuilder()
.setTaskUpdate(JobChangeNotification.TaskUpdate.newBuilder().setTask(TASK1).build())
.build();
JobChangeNotification updatedEvent = taskDataInjector.injectIntoTaskUpdateEvent(event);
Task merged = updatedEvent.getTaskUpdate().getTask();
assertThat(merged.getMigrationDetails().getNeedsMigration()).isTrue();
assertThat(merged.getMigrationDetails().getDeadline()).isEqualTo(deadlineTimestamp);
}
@Test
public void testTaskUpdateEventWithoutRelocationDeadline() {
when(relocationDataReplicator.getCurrent()).thenReturn(TaskRelocationSnapshot.empty());
JobChangeNotification event = JobChangeNotification.newBuilder()
.setTaskUpdate(JobChangeNotification.TaskUpdate.newBuilder().setTask(TASK1).build())
.build();
JobChangeNotification updatedEvent = taskDataInjector.injectIntoTaskUpdateEvent(event);
assertThat(updatedEvent).isEqualTo(event);
}
private TaskRelocationSnapshot newRelocationSnapshot(TaskRelocationPlan... plans) {
TaskRelocationSnapshot.Builder builder = TaskRelocationSnapshot.newBuilder();
for (TaskRelocationPlan plan : plans) {
builder.addPlan(plan);
}
return builder.build();
}
@Test
public void testFindTaskWithRelocationDeadline() {
long deadlineTimestamp = titusRuntime.getClock().wallTime() + 1_000;
when(relocationDataReplicator.getCurrent()).thenReturn(
newRelocationSnapshot(newRelocationPlan(TASK1, deadlineTimestamp))
);
Task merged = Observable.just(TASK1).map(taskDataInjector::injectIntoTask).toBlocking().first();
assertThat(merged.getMigrationDetails().getNeedsMigration()).isTrue();
assertThat(merged.getMigrationDetails().getDeadline()).isEqualTo(deadlineTimestamp);
}
@Test
public void testFindTaskWithoutRelocationDeadline() {
when(relocationDataReplicator.getCurrent()).thenReturn(TaskRelocationSnapshot.empty());
Task merged = Observable.just(TASK1).map(taskDataInjector::injectIntoTask).toBlocking().first();
assertThat(merged.getMigrationDetails().getNeedsMigration()).isFalse();
}
@Test
public void testFindTaskWithLegacyMigration() {
long deadlineTimestamp = titusRuntime.getClock().wallTime() + 1_000;
Task legacyTask = toLegacyTask(TASK1, deadlineTimestamp);
when(relocationDataReplicator.getCurrent()).thenReturn(TaskRelocationSnapshot.empty());
Observable.just(legacyTask).map(taskDataInjector::injectIntoTask).toBlocking().first();
assertThat(legacyTask).isEqualToComparingFieldByField(legacyTask);
}
@Test
public void testFindTasksWithRelocationDeadline() {
long deadline1 = titusRuntime.getClock().wallTime() + 1_000;
long deadline2 = titusRuntime.getClock().wallTime() + 2_000;
TaskQueryResult queryResult = TaskQueryResult.newBuilder()
.addItems(TASK1)
.addItems(TASK2)
.build();
when(relocationDataReplicator.getCurrent()).thenReturn(newRelocationSnapshot(
newRelocationPlan(TASK1, deadline1),
newRelocationPlan(TASK2, deadline2)
));
TaskQueryResult merged = Observable.just(queryResult).map(queryResult1 -> taskDataInjector.injectIntoTaskQueryResult(queryResult1)).toBlocking().first();
assertThat(merged.getItems(0).getMigrationDetails().getNeedsMigration()).isTrue();
assertThat(merged.getItems(0).getMigrationDetails().getDeadline()).isEqualTo(deadline1);
assertThat(merged.getItems(1).getMigrationDetails().getNeedsMigration()).isTrue();
assertThat(merged.getItems(1).getMigrationDetails().getDeadline()).isEqualTo(deadline2);
}
@Test
public void testFindTasksWithoutRelocationDeadline() {
long deadline1 = titusRuntime.getClock().wallTime() + 1_000;
TaskQueryResult queryResult = TaskQueryResult.newBuilder()
.addItems(TASK1)
.addItems(TASK2)
.build();
when(relocationDataReplicator.getCurrent()).thenReturn(newRelocationSnapshot(newRelocationPlan(TASK1, deadline1)));
TaskQueryResult merged = Observable.just(queryResult).map(queryResult1 -> taskDataInjector.injectIntoTaskQueryResult(queryResult1)).toBlocking().first();
assertThat(merged.getItems(0).getMigrationDetails().getNeedsMigration()).isTrue();
assertThat(merged.getItems(0).getMigrationDetails().getDeadline()).isEqualTo(deadline1);
assertThat(merged.getItems(1).getMigrationDetails().getNeedsMigration()).isFalse();
}
@Test
public void testFindTasksWithLegacyMigration() {
long deadline1 = titusRuntime.getClock().wallTime() + 1_000;
long legacyDeadline2 = titusRuntime.getClock().wallTime() + 2_000;
Task legacyTask = toLegacyTask(TASK2, legacyDeadline2);
TaskQueryResult queryResult = TaskQueryResult.newBuilder()
.addItems(TASK1)
.addItems(legacyTask)
.build();
when(relocationDataReplicator.getCurrent()).thenReturn(newRelocationSnapshot(newRelocationPlan(TASK1, deadline1)));
TaskQueryResult merged = Observable.just(queryResult).map(queryResult1 -> taskDataInjector.injectIntoTaskQueryResult(queryResult1)).toBlocking().first();
assertThat(merged.getItems(0).getMigrationDetails().getNeedsMigration()).isTrue();
assertThat(merged.getItems(0).getMigrationDetails().getDeadline()).isEqualTo(deadline1);
assertThat(merged.getItems(1)).isEqualTo(legacyTask);
}
@Test
public void testBuildBasicImageFromContainerStatus() {
Pod pod = new Pod();
ObjectMeta metadata = new ObjectMeta();
HashMap<String, String> annotations = new HashMap<>();
annotations.put(ANNOTATION_KEY_IMAGE_TAG_PREFIX + "container1", "container1sTag");
metadata.setAnnotations(annotations);
pod.setMetadata(metadata);
String imageWithDigest = "registry.example.com/container1Image@sha123:123456";
String imageWithTag = "registry.example.com/container2Image:mytag";
ContainerStatus containerStatus1 = new ContainerStatus();
containerStatus1.setName("container1");
containerStatus1.setImage(imageWithDigest);
ContainerStatus containerStatus2 = new ContainerStatus();
containerStatus2.setName("container2");
containerStatus2.setImage(imageWithTag);
PodStatus status = new PodStatus();
status.setContainerStatuses(Arrays.asList(containerStatus1, containerStatus2));
pod.setStatus(status);
BasicImage bi1 = buildBasicImageFromContainerStatus(pod, imageWithDigest, "container1");
assertThat(bi1.getName()).isEqualTo("container1Image");
assertThat(bi1.getTag()).isEqualTo("container1sTag");
assertThat(bi1.getDigest()).isEqualTo("sha123:123456");
BasicImage bi2 = buildBasicImageFromContainerStatus(pod, imageWithTag, "container2");
assertThat(bi2.getName()).isEqualTo("container2Image");
assertThat(bi2.getTag()).isEqualTo("mytag");
assertThat(bi2.getDigest()).isEqualTo("");
}
private Task toLegacyTask(Task task, long deadlineTimestamp) {
return task.toBuilder().setMigrationDetails(MigrationDetails.newBuilder()
.setNeedsMigration(true)
.setDeadline(deadlineTimestamp)
.build()
).build();
}
private TaskRelocationPlan newRelocationPlan(Task task, long deadlineTimestamp) {
return TaskRelocationPlan.newBuilder()
.withTaskId(task.getId())
.withReason(TaskRelocationReason.TaskMigration)
.withRelocationTime(deadlineTimestamp)
.build();
}
} | 9,365 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3/internal/GatewayJobServiceGatewayTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.function.Function;
import java.util.stream.Collectors;
import com.netflix.titus.api.jobmanager.model.job.BatchJobTask;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.grpc.protogen.Page;
import com.netflix.titus.grpc.protogen.Pagination;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskQuery;
import com.netflix.titus.grpc.protogen.TaskQueryResult;
import com.netflix.titus.runtime.endpoint.common.EmptyLogStorageInfo;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters;
import com.netflix.titus.testkit.model.job.JobGenerator;
import org.junit.Test;
import static com.netflix.titus.common.util.Evaluators.evaluateTimes;
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
public class GatewayJobServiceGatewayTest {
private static final int PAGE_SIZE = 2;
private static final Page FIRST_PAGE = Page.newBuilder().setPageSize(PAGE_SIZE).build();
private static final int JOB_SIZE = 6;
private static final int ARCHIVED_TASKS_COUNT = JOB_SIZE * 2;
private static final int ALL_TASKS_COUNT = JOB_SIZE + ARCHIVED_TASKS_COUNT;
private static final Job<BatchJobExt> JOB = JobGenerator.batchJobsOfSize(JOB_SIZE).getValue();
private static final List<Task> ARCHIVED_TASKS = evaluateTimes(ARCHIVED_TASKS_COUNT, i -> newGrpcTask(i, TaskState.Finished));
private static final List<Task> ACTIVE_TASKS = evaluateTimes(JOB_SIZE, i -> newGrpcTask(ARCHIVED_TASKS_COUNT + i, TaskState.Started));
@Test
public void verifyDeDupTaskIds() {
List<String> expectedIdList = new ArrayList<String>() {{
add("task1");
add("task2");
add("task3");
add("task4");
}};
// with duplicate ids
List<Task> activeTasks = new ArrayList<>();
activeTasks.add(Task.newBuilder().setId("task1").build());
activeTasks.add(Task.newBuilder().setId("task2").build());
activeTasks.add(Task.newBuilder().setId("task3").build());
List<Task> archivedTasks = new ArrayList<>();
archivedTasks.add(Task.newBuilder().setId("task2").build());
archivedTasks.add(Task.newBuilder().setId("task3").build());
archivedTasks.add(Task.newBuilder().setId("task4").build());
List<Task> tasks = GatewayJobServiceGateway.deDupTasks(activeTasks, archivedTasks);
assertThat(tasks.size()).isEqualTo(4);
List<String> taskIds = tasks.stream().map(Task::getId).sorted().collect(Collectors.toList());
assertThat(taskIds).isEqualTo(expectedIdList);
// disjoint lists
activeTasks = new ArrayList<>();
activeTasks.add(Task.newBuilder().setId("task1").build());
activeTasks.add(Task.newBuilder().setId("task2").build());
archivedTasks = new ArrayList<>();
archivedTasks.add(Task.newBuilder().setId("task3").build());
archivedTasks.add(Task.newBuilder().setId("task4").build());
tasks = GatewayJobServiceGateway.deDupTasks(activeTasks, archivedTasks);
assertThat(tasks.size()).isEqualTo(4);
taskIds = tasks.stream().map(Task::getId).collect(Collectors.toList());
Collections.sort(taskIds);
assertThat(taskIds).isEqualTo(expectedIdList);
}
@Test
public void testCombineActiveAndArchiveTaskResultSetWithCursor() {
testCombineActiveAndArchiveTaskResultSet(this::newTaskQueryWithCursor, this::takeActivePage);
}
@Test
public void testCombineActiveAndArchiveTaskResultSetWithPageNumber() {
testCombineActiveAndArchiveTaskResultSet(this::newTaskQueryWithPagNumber, p -> takeAllActive());
}
private void testCombineActiveAndArchiveTaskResultSet(Function<Pagination, TaskQuery> queryFunction, Function<Integer, TaskQueryResult> activePageResultFunction) {
// First page
TaskQueryResult combinedResult = testCombineForFirstPage();
// Iterate using page numbers
// Archive tasks first.
Pagination lastPagination = combinedResult.getPagination();
for (int p = 1; p < ARCHIVED_TASKS_COUNT / PAGE_SIZE; p++) {
TaskQuery cursorQuery = queryFunction.apply(lastPagination);
TaskQueryResult cursorResult = GatewayJobServiceGateway.combineTaskResults(cursorQuery, activePageResultFunction.apply(0), ARCHIVED_TASKS);
checkCombinedResult(cursorResult, ARCHIVED_TASKS.subList(p * PAGE_SIZE, (p + 1) * PAGE_SIZE));
lastPagination = cursorResult.getPagination();
}
// Now fetch the active data
for (int p = 0; p < JOB_SIZE / PAGE_SIZE; p++) {
TaskQuery cursorQuery = queryFunction.apply(lastPagination);
TaskQueryResult cursorResult = GatewayJobServiceGateway.combineTaskResults(cursorQuery, activePageResultFunction.apply(p), ARCHIVED_TASKS);
checkCombinedResult(cursorResult, ACTIVE_TASKS.subList(p * PAGE_SIZE, (p + 1) * PAGE_SIZE));
lastPagination = cursorResult.getPagination();
}
}
private TaskQueryResult testCombineForFirstPage() {
TaskQuery taskQuery = TaskQuery.newBuilder().setPage(FIRST_PAGE).build();
TaskQueryResult page0ActiveSetResult = takeActivePage(0);
TaskQueryResult combinedResult = GatewayJobServiceGateway.combineTaskResults(taskQuery, page0ActiveSetResult, ARCHIVED_TASKS);
checkCombinedResult(combinedResult, ARCHIVED_TASKS.subList(0, PAGE_SIZE));
return combinedResult;
}
private TaskQuery newTaskQueryWithCursor(Pagination lastPagination) {
return TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(PAGE_SIZE).setCursor(lastPagination.getCursor()))
.build();
}
private TaskQuery newTaskQueryWithPagNumber(Pagination lastPagination) {
return TaskQuery.newBuilder()
.setPage(Page.newBuilder().setPageSize(PAGE_SIZE).setPageNumber(lastPagination.getCurrentPage().getPageNumber() + 1))
.build();
}
private TaskQueryResult takeActivePage(int pageNumber) {
return TaskQueryResult.newBuilder()
.setPagination(Pagination.newBuilder()
.setCurrentPage(FIRST_PAGE.toBuilder().setPageNumber(pageNumber))
.setTotalItems(ACTIVE_TASKS.size())
)
.addAllItems(ACTIVE_TASKS.subList(pageNumber * PAGE_SIZE, (pageNumber + 1) * PAGE_SIZE))
.build();
}
private TaskQueryResult takeAllActive() {
return TaskQueryResult.newBuilder()
.setPagination(Pagination.newBuilder()
.setCurrentPage(FIRST_PAGE)
.setTotalItems(ACTIVE_TASKS.size())
)
.addAllItems(ACTIVE_TASKS)
.build();
}
private void checkCombinedResult(TaskQueryResult combinedResult, List<Task> expectedTaskList) {
assertThat(combinedResult.getPagination().getCursor()).isNotNull();
assertThat(combinedResult.getPagination().getTotalItems()).isEqualTo(ALL_TASKS_COUNT);
assertThat(combinedResult.getPagination().getTotalPages()).isEqualTo(ALL_TASKS_COUNT / PAGE_SIZE);
assertThat(expectedTaskList).isEqualTo(combinedResult.getItemsList());
}
private static Task newGrpcTask(int index, TaskState taskState) {
BatchJobTask coreTask = JobGenerator.batchTasks(JOB).getValue().toBuilder()
.withId("task#" + index)
.withStatus(
com.netflix.titus.api.jobmanager.model.job.TaskStatus.newBuilder()
.withState(taskState)
.withTimestamp(index)
.build()
).build();
return GrpcJobManagementModelConverters.toGrpcTask(coreTask, EmptyLogStorageInfo.empty());
}
}
| 9,366 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3/internal/NeedsMigrationQueryHandlerTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.List;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.model.Page;
import com.netflix.titus.api.model.PageResult;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.grpc.protogen.JobDescriptor;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.runtime.connector.jobmanager.JobDataReplicator;
import com.netflix.titus.runtime.connector.jobmanager.snapshot.JobSnapshot;
import com.netflix.titus.runtime.connector.jobmanager.snapshot.JobSnapshotFactories;
import com.netflix.titus.runtime.connector.relocation.RelocationDataReplicator;
import com.netflix.titus.runtime.connector.relocation.TaskRelocationSnapshot;
import com.netflix.titus.runtime.endpoint.JobQueryCriteria;
import com.netflix.titus.runtime.endpoint.common.EmptyLogStorageInfo;
import com.netflix.titus.testkit.model.job.JobGenerator;
import org.junit.Before;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class NeedsMigrationQueryHandlerTest {
private static final JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> QUERY_ALL = JobQueryCriteria.<TaskStatus.TaskState, JobDescriptor.JobSpecCase>newBuilder()
.withNeedsMigration(true)
.build();
private final TitusRuntime titusRuntime = TitusRuntimes.internal();
private final JobDataReplicator jobDataReplicator = mock(JobDataReplicator.class);
private final RelocationDataReplicator relocationDataReplicator = mock(RelocationDataReplicator.class);
private final NeedsMigrationQueryHandler migrationHandler = new NeedsMigrationQueryHandler(
jobDataReplicator,
relocationDataReplicator,
EmptyLogStorageInfo.empty(),
titusRuntime
);
@Before
public void setUp() {
when(jobDataReplicator.getCurrent()).thenReturn(JobSnapshotFactories.newDefaultEmptySnapshot(titusRuntime));
when(relocationDataReplicator.getCurrent()).thenReturn(TaskRelocationSnapshot.newBuilder().build());
}
@Test
public void testFindJobs() {
Pair<Job<?>, List<Task>> job1 = addToJobDataReplicator(newJobAndTasks("job1", 2));
Pair<Job<?>, List<Task>> job2 = addToJobDataReplicator(newJobAndTasks("job2", 4));
Pair<Job<?>, List<Task>> job3 = addToJobDataReplicator(newJobAndTasks("job3", 0));
// No migration required yet
PageResult<com.netflix.titus.grpc.protogen.Job> result = migrationHandler.findJobs(QUERY_ALL, Page.first(10));
assertThat(result.getItems()).isEmpty();
// Enable migration
setNeedsRelocation(job1, 2);
setNeedsRelocation(job2, 1);
// Limit query to job1
JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> query = JobQueryCriteria.<TaskStatus.TaskState, JobDescriptor.JobSpecCase>newBuilder()
.withNeedsMigration(true)
.withJobIds(CollectionsExt.asSet("job1"))
.build();
PageResult<com.netflix.titus.grpc.protogen.Job> job1TasksOnly = migrationHandler.findJobs(query, Page.first(10));
assertThat(job1TasksOnly.getItems()).hasSize(1);
assertThat(job1TasksOnly.getItems().get(0).getId()).isEqualTo("job1");
// Now everything
PageResult<com.netflix.titus.grpc.protogen.Job> allJobs = migrationHandler.findJobs(QUERY_ALL, Page.first(10));
assertThat(allJobs.getItems()).hasSize(2);
}
@Test
public void testFindTasks() {
Pair<Job<?>, List<Task>> job1 = addToJobDataReplicator(newJobAndTasks("job1", 2));
Pair<Job<?>, List<Task>> job2 = addToJobDataReplicator(newJobAndTasks("job2", 4));
// No migration required yet
PageResult<com.netflix.titus.grpc.protogen.Task> result = migrationHandler.findTasks(QUERY_ALL, Page.first(10));
assertThat(result.getItems()).isEmpty();
// Enable migration
setNeedsRelocation(job1, 2);
setNeedsRelocation(job2, 1);
// Limit query to job1
JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> query = JobQueryCriteria.<TaskStatus.TaskState, JobDescriptor.JobSpecCase>newBuilder()
.withNeedsMigration(true)
.withJobIds(CollectionsExt.asSet("job1"))
.build();
PageResult<com.netflix.titus.grpc.protogen.Task> job1TasksOnly = migrationHandler.findTasks(query, Page.first(10));
assertThat(job1TasksOnly.getItems()).hasSize(2);
// Now everything
PageResult<com.netflix.titus.grpc.protogen.Task> allJobs = migrationHandler.findTasks(QUERY_ALL, Page.first(10));
assertThat(allJobs.getItems()).hasSize(3);
}
private Pair<Job<?>, List<Task>> addToJobDataReplicator(Pair<Job<?>, List<Task>> jobAndTasks) {
JobSnapshot updated = jobDataReplicator.getCurrent().updateJob(jobAndTasks.getLeft()).orElse(jobDataReplicator.getCurrent());
for (Task task : jobAndTasks.getRight()) {
updated = updated.updateTask(task, false).orElse(updated);
}
when(jobDataReplicator.getCurrent()).thenReturn(updated);
return jobAndTasks;
}
private void setNeedsRelocation(Pair<Job<?>, List<Task>> jobAndTasks, int needsRelocationCount) {
TaskRelocationSnapshot.Builder builder = relocationDataReplicator.getCurrent().toBuilder();
List<Task> tasks = jobAndTasks.getRight();
for (int i = 0; i < needsRelocationCount; i++) {
builder.addPlan(TaskRelocationPlan.newBuilder()
.withTaskId(tasks.get(i).getId())
.build()
);
}
when(relocationDataReplicator.getCurrent()).thenReturn(builder.build());
}
private static Pair<Job<?>, List<Task>> newJobAndTasks(String jobId, int taskCount) {
Job<BatchJobExt> job = JobFunctions.changeBatchJobSize(
JobGenerator.oneBatchJob().toBuilder().withId(jobId).build(),
taskCount
);
List<Task> tasks = (List) JobGenerator.batchTasks(job).getValues(taskCount);
return Pair.of(job, tasks);
}
} | 9,367 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3/internal/JobImageSanitizerTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.model.job.Image;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.common.model.sanitizer.ValidationError;
import com.netflix.titus.runtime.connector.registry.RegistryClient;
import com.netflix.titus.runtime.connector.registry.TitusRegistryException;
import com.netflix.titus.runtime.endpoint.admission.JobImageSanitizer;
import com.netflix.titus.runtime.endpoint.admission.JobImageSanitizerConfiguration;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import org.junit.Before;
import org.junit.Test;
import org.springframework.http.HttpStatus;
import reactor.core.publisher.Mono;
import reactor.test.StepVerifier;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class JobImageSanitizerTest {
private static final String repo = "myRepo";
private static final String tag = "myTag";
private static final String digest = "sha256:f9f5bb506406b80454a4255b33ed2e4383b9e4a32fb94d6f7e51922704e818fa";
private final JobImageSanitizerConfiguration configuration = mock(JobImageSanitizerConfiguration.class);
private final RegistryClient registryClient = mock(RegistryClient.class);
private JobImageSanitizer sanitizer;
private final JobDescriptor<?> jobDescriptorWithDigest = JobDescriptorGenerator.batchJobDescriptors()
.map(jd -> jd.but(d -> d.getContainer().toBuilder()
.withImage(Image.newBuilder()
.withName(repo)
.withDigest(digest)
.build())
))
.getValue();
private final JobDescriptor<?> jobDescriptorWithTag = JobDescriptorGenerator.batchJobDescriptors()
.map(jd -> jd.but(d -> d.getContainer().toBuilder()
.withImage(Image.newBuilder()
.withName(repo)
.withTag(tag)
.build())
))
.getValue();
@Before
public void setUp() {
when(configuration.isEnabled()).thenReturn(true);
when(configuration.getJobImageValidationTimeoutMs()).thenReturn(1000L);
when(configuration.getErrorType()).thenReturn(ValidationError.Type.HARD.name());
when(registryClient.getImageDigest(anyString(), anyString())).thenReturn(Mono.just(digest));
sanitizer = new JobImageSanitizer(configuration, registryClient, new DefaultRegistry());
}
@Test
public void testJobWithTagResolution() {
when(registryClient.getImageDigest(anyString(), anyString())).thenReturn(Mono.just(digest));
StepVerifier.create(sanitizer.sanitizeAndApply(jobDescriptorWithTag))
.assertNext(jobDescriptor -> assertThat(jobDescriptor.getContainer().getImage().getDigest())
.isEqualTo(digest))
.verifyComplete();
}
@Test
public void testJobWithNonExistentTag() {
when(registryClient.getImageDigest(anyString(), anyString()))
.thenReturn(Mono.error(TitusRegistryException.imageNotFound(repo, tag)));
StepVerifier.create(sanitizer.sanitize(jobDescriptorWithTag))
.expectErrorSatisfies(throwable -> {
assertThat(throwable).isInstanceOf(IllegalArgumentException.class);
Throwable cause = throwable.getCause();
assertThat(cause).isInstanceOf(TitusRegistryException.class);
assertThat(((TitusRegistryException) cause).getErrorCode()).isEqualByComparingTo(TitusRegistryException.ErrorCode.IMAGE_NOT_FOUND);
})
.verify();
}
/**
* This test verifies that non-NOT_FOUND errors are suppressed and the original job descriptor is returned.
*/
@Test
public void testSuppressedInternalError() {
when(registryClient.getImageDigest(anyString(), anyString()))
.thenReturn(Mono.error(TitusRegistryException.internalError(repo, tag, HttpStatus.INTERNAL_SERVER_ERROR)));
StepVerifier.create(sanitizer.sanitizeAndApply(jobDescriptorWithTag))
.assertNext(jd -> {
assertThat(jd.getContainer().getImage().getDigest()).isNullOrEmpty();
assertThat(jd.getContainer().getImage()).isEqualTo(jobDescriptorWithTag.getContainer().getImage());
assertThat(((JobDescriptor<?>) jd).getAttributes())
.containsEntry(JobAttributes.JOB_ATTRIBUTES_SANITIZATION_SKIPPED_IMAGE, "true");
})
.verifyComplete();
}
@Test
public void testRegistryRuntimeError() {
when(registryClient.getImageDigest(anyString(), anyString()))
.thenReturn(Mono.error(new RuntimeException("Unable to reach the registry")));
StepVerifier.create(sanitizer.sanitizeAndApply(jobDescriptorWithTag))
.expectError(IllegalArgumentException.class)
.verify();
}
@Test
public void testJobWithDigestExists() {
Image image = jobDescriptorWithDigest.getContainer().getImage();
when(registryClient.getImageDigest(image.getName(), image.getDigest())).thenReturn(Mono.just(digest));
StepVerifier.create(sanitizer.sanitize(jobDescriptorWithDigest))
.expectNextCount(0) // nothing to do when digest is valid
.verifyComplete();
}
}
| 9,368 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3/internal/JobEbsVolumeValidatorTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.ebs.EbsVolume;
import com.netflix.titus.common.model.sanitizer.ValidationError;
import com.netflix.titus.common.runtime.TitusRuntimes;
import com.netflix.titus.runtime.endpoint.admission.JobEbsVolumeValidator;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import org.junit.Test;
import reactor.test.StepVerifier;
public class JobEbsVolumeValidatorTest {
private static final EbsVolume EBS_VOLUME_VALID = EbsVolume.newBuilder()
.withVolumeId("vol-valid")
.withMountPath("/valid")
.withMountPermissions(EbsVolume.MountPerm.RW)
.withFsType("xfs")
.withVolumeCapacityGB(5)
.withVolumeAvailabilityZone("us-east-1c")
.build();
// Invalid volume missing AZ and capacity fields
private static final EbsVolume EBS_VOLUME_INVALID = EbsVolume.newBuilder()
.withVolumeId("vol-invalid")
.withMountPath("/invalid")
.withMountPermissions(EbsVolume.MountPerm.RW)
.withFsType("xfs")
.build();
private static final List<EbsVolume> INVALID_EBS_VOLUMES = Arrays.asList(EBS_VOLUME_VALID, EBS_VOLUME_INVALID);
private static final List<EbsVolume> VALID_EBS_VOLUMES = Collections.singletonList(EBS_VOLUME_VALID);
private static final JobDescriptor<?> JOB_WITH_NO_EBS_VOLUMES = JobDescriptorGenerator.oneTaskBatchJobDescriptor();
private static final JobDescriptor<?> JOB_WITH_INVALID_EBS_VOLUMES = JOB_WITH_NO_EBS_VOLUMES.toBuilder()
.withContainer(JOB_WITH_NO_EBS_VOLUMES.getContainer().toBuilder()
.withContainerResources(JOB_WITH_NO_EBS_VOLUMES.getContainer().getContainerResources().toBuilder()
.withEbsVolumes(INVALID_EBS_VOLUMES)
.build())
.build())
.build();
private static final JobDescriptor<?> JOB_WITH_VALID_EBS_VOLUMES = JOB_WITH_NO_EBS_VOLUMES.toBuilder()
.withContainer(JOB_WITH_NO_EBS_VOLUMES.getContainer().toBuilder()
.withContainerResources(JOB_WITH_NO_EBS_VOLUMES.getContainer().getContainerResources().toBuilder()
.withEbsVolumes(VALID_EBS_VOLUMES)
.build())
.build())
.build();
private final JobEbsVolumeValidator jobEbsVolumeValidator = new JobEbsVolumeValidator(() -> ValidationError.Type.HARD, TitusRuntimes.internal());
@Test
public void testJobWithInvalidEbsVolume() {
StepVerifier.create(jobEbsVolumeValidator.validate(JOB_WITH_INVALID_EBS_VOLUMES))
.expectNextMatches(violations -> violations.size() == 1)
.verifyComplete();
}
@Test
public void testJobWithValidEbsVolume() {
StepVerifier.create(jobEbsVolumeValidator.validate(JOB_WITH_VALID_EBS_VOLUMES))
.expectNextMatches(Set::isEmpty)
.verifyComplete();
}
@Test
public void testJobWithNoEbsVolumes() {
StepVerifier.create(jobEbsVolumeValidator.validate(JOB_WITH_NO_EBS_VOLUMES))
.expectNextMatches(Set::isEmpty)
.verifyComplete();
}
@Test
public void testJobWithDuplicateVolumes() {
StepVerifier.create(jobEbsVolumeValidator.validate(
JOB_WITH_VALID_EBS_VOLUMES
.but(jd -> jd.getContainer()
.but(c -> c.getContainerResources().toBuilder()
.withEbsVolumes(Arrays.asList(EBS_VOLUME_VALID, EBS_VOLUME_VALID))
.build()))
))
.expectNextMatches(violations -> violations.size() == 1 && violations.stream()
.filter(validationError -> validationError.getDescription().contains("Duplicate volume IDs exist"))
.count() == 1)
.verifyComplete();
}
}
| 9,369 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3/internal/ServiceMeshImageSanitizerTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.Map;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.common.model.sanitizer.ValidationError;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.runtime.connector.registry.RegistryClient;
import com.netflix.titus.runtime.connector.registry.TitusRegistryException;
import com.netflix.titus.runtime.endpoint.admission.ServiceMeshImageSanitizer;
import com.netflix.titus.runtime.endpoint.admission.ServiceMeshImageSanitizerConfiguration;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import org.junit.Before;
import org.junit.Test;
import org.springframework.http.HttpStatus;
import reactor.core.publisher.Mono;
import reactor.test.StepVerifier;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class ServiceMeshImageSanitizerTest {
private static final String repo = "proxydRepo/proxydImage";
private static final String tag = "proxydTag";
private static final String digest = "sha256:f9f5bb506406b80454a4255b33ed2e4383b9e4a32fb94d6f7e51922704e818fa";
private final ServiceMeshImageSanitizerConfiguration configuration = mock(ServiceMeshImageSanitizerConfiguration.class);
private final RegistryClient registryClient = mock(RegistryClient.class);
private ServiceMeshImageSanitizer sanitizer;
private static final String imageNameDigest = String.format("%s@%s", repo, digest);
private static final Map<String, String> digestAttrs = CollectionsExt.asMap(
JobAttributes.JOB_CONTAINER_ATTRIBUTE_SERVICEMESH_ENABLED, "true",
JobAttributes.JOB_CONTAINER_ATTRIBUTE_SERVICEMESH_CONTAINER, imageNameDigest);
private static final JobDescriptor<?> jobDescriptorWithDigest = JobDescriptorGenerator.batchJobDescriptors()
.map(jd -> jd.but(d -> d.toBuilder()
.withContainer(jd.getContainer().toBuilder()
.withAttributes(CollectionsExt.copyAndAdd(d.getAttributes(), digestAttrs))
.build())
.build()))
.getValue();
private static final String imageNameTag = String.format("%s:%s", repo, tag);
private static final Map<String, String> tagAttrs = CollectionsExt.asMap(
JobAttributes.JOB_CONTAINER_ATTRIBUTE_SERVICEMESH_ENABLED, "true",
JobAttributes.JOB_CONTAINER_ATTRIBUTE_SERVICEMESH_CONTAINER, imageNameTag);
private static final JobDescriptor<?> jobDescriptorWithTag = JobDescriptorGenerator.batchJobDescriptors()
.map(jd -> jd.but(d -> d.toBuilder()
.withContainer(jd.getContainer().toBuilder()
.withAttributes(CollectionsExt.copyAndAdd(d.getAttributes(), tagAttrs))
.build())
.build()))
.getValue();
private static final Map<String, String> badAttrs = CollectionsExt.asMap(
JobAttributes.JOB_CONTAINER_ATTRIBUTE_SERVICEMESH_ENABLED, "true",
JobAttributes.JOB_CONTAINER_ATTRIBUTE_SERVICEMESH_CONTAINER, repo);
private static final JobDescriptor<?> jobDescriptorBadImageName = JobDescriptorGenerator.batchJobDescriptors()
.map(jd -> jd.but(d -> d.toBuilder()
.withContainer(jd.getContainer().toBuilder()
.withAttributes(CollectionsExt.copyAndAdd(d.getAttributes(), badAttrs))
.build())
.build()))
.getValue();
@Before
public void setUp() {
when(configuration.isEnabled()).thenReturn(true);
when(configuration.getServiceMeshImageValidationTimeoutMs()).thenReturn(1000L);
when(configuration.getErrorType()).thenReturn(ValidationError.Type.HARD.name());
when(registryClient.getImageDigest(anyString(), anyString())).thenReturn(Mono.just(digest));
sanitizer = new ServiceMeshImageSanitizer(configuration, registryClient, new DefaultRegistry());
}
@Test
public void testJobWithTagResolution() {
when(registryClient.getImageDigest(anyString(), anyString())).thenReturn(Mono.just(digest));
StepVerifier.create(sanitizer.sanitizeAndApply(jobDescriptorWithTag))
.assertNext(jobDescriptor -> assertThat(jobDescriptor
.getContainer()
.getAttributes()
.get(JobAttributes.JOB_CONTAINER_ATTRIBUTE_SERVICEMESH_CONTAINER))
.isEqualTo(imageNameDigest))
.verifyComplete();
}
@Test
public void testJobWithNonExistentTag() {
when(registryClient.getImageDigest(anyString(), anyString()))
.thenReturn(Mono.error(TitusRegistryException.imageNotFound(repo, tag)));
StepVerifier.create(sanitizer.sanitize(jobDescriptorWithTag))
.expectErrorSatisfies(throwable -> {
assertThat(throwable.getCause()).isInstanceOf(TitusRegistryException.class);
assertThat(((TitusRegistryException) throwable.getCause()).getErrorCode()).isEqualByComparingTo(TitusRegistryException.ErrorCode.IMAGE_NOT_FOUND);
})
.verify();
}
/**
* This test verifies that non-NOT_FOUND errors are suppressed and the original job descriptor is returned.
*/
@Test
public void testSuppressedInternalError() {
when(registryClient.getImageDigest(anyString(), anyString()))
.thenReturn(Mono.error(TitusRegistryException.internalError(repo, tag, HttpStatus.INTERNAL_SERVER_ERROR)));
StepVerifier.create(sanitizer.sanitizeAndApply(jobDescriptorWithTag))
.assertNext(jd -> {
assertThat(((JobDescriptor<?>) jd).getContainer().getAttributes())
.containsEntry(JobAttributes.JOB_ATTRIBUTES_SANITIZATION_SKIPPED_SERVICEMESH_IMAGE, "true");
})
.verifyComplete();
}
@Test
public void testJobWithBadImageName() {
try {
when(registryClient.getImageDigest(anyString(), anyString()))
.thenThrow(new IllegalStateException("should not call registryClient"));
StepVerifier.create(sanitizer.sanitize(jobDescriptorBadImageName))
.expectErrorSatisfies(throwable -> {
assertThat(throwable).isInstanceOf(IllegalArgumentException.class);
})
.verify();
} catch (Throwable t) {
fail(t.getMessage());
}
}
@Test
public void testJobWithDigestExists() {
when(registryClient.getImageDigest(anyString(), anyString())).thenReturn(Mono.just(digest));
StepVerifier.create(sanitizer.sanitize(jobDescriptorWithDigest))
.expectNextCount(0) // nothing to do when digest is valid
.verifyComplete();
}
}
| 9,370 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/test/java/com/netflix/titus/gateway/service/v3/internal/JobSecurityValidatorTest.java | /*
*
* * Copyright 2019 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.Set;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.titus.api.connector.cloud.IamConnector;
import com.netflix.titus.api.iam.service.IamConnectorException;
import com.netflix.titus.api.jobmanager.JobAttributes;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.SecurityProfile;
import com.netflix.titus.common.model.sanitizer.ValidationError;
import com.netflix.titus.runtime.endpoint.admission.JobIamValidator;
import com.netflix.titus.runtime.endpoint.admission.JobSecurityValidatorConfiguration;
import com.netflix.titus.testkit.model.job.JobDescriptorGenerator;
import org.junit.Before;
import org.junit.Test;
import reactor.core.publisher.Mono;
import reactor.test.StepVerifier;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class JobSecurityValidatorTest {
private static final String VALID_IAM_ROLE_NAME = "myValidIamRole";
private static final String INVALID_IAM_ROLE_NAME = "myInvalidIamRole";
private static final String IAM_ASSUME_ROLE_NAME = "myIamAssumeRole";
private static final String CANNOT_ASSUME_IAM_ERROR_MSG = "Titus cannot assume into role";
private static final String MISSING_IAM_ERROR_MSG = "Could not find IAM";
private final JobSecurityValidatorConfiguration configuration = mock(JobSecurityValidatorConfiguration.class);
private final IamConnector iamConnector = mock(IamConnector.class);
private JobIamValidator iamValidator;
private final JobDescriptor<?> jobDescriptorWithValidIam = JobDescriptorGenerator.batchJobDescriptors()
.map(jd -> jd.but(d -> d.getContainer().toBuilder()
.withSecurityProfile(SecurityProfile.newBuilder()
.withIamRole(VALID_IAM_ROLE_NAME)
.build())
))
.getValue();
private final JobDescriptor<?> jobDescriptorWithInvalidIam = JobDescriptorGenerator.batchJobDescriptors()
.map(jd -> jd.but(d -> d.getContainer().toBuilder()
.withSecurityProfile(SecurityProfile.newBuilder()
.withIamRole(INVALID_IAM_ROLE_NAME)
.build())
))
.getValue();
@Before
public void setUp() {
when(configuration.isIamValidatorEnabled()).thenReturn(true);
when(configuration.getAgentIamAssumeRole()).thenReturn(IAM_ASSUME_ROLE_NAME);
when(configuration.getIamValidationTimeoutMs()).thenReturn(10000L);
when(configuration.getErrorType()).thenReturn(ValidationError.Type.HARD.name());
iamValidator = new JobIamValidator(configuration, iamConnector, new DefaultRegistry());
}
@Test
public void testJobWithValidIam() {
when(iamConnector.canIamAssume(VALID_IAM_ROLE_NAME, IAM_ASSUME_ROLE_NAME)).thenReturn(Mono.empty());
StepVerifier.create(iamValidator.validate(jobDescriptorWithValidIam))
.assertNext(validationErrors -> assertThat(validationErrors.isEmpty()).isTrue())
.verifyComplete();
}
@Test
public void testJobWithUnassumableIam() {
String errorMsg = String.format("%s %s", CANNOT_ASSUME_IAM_ERROR_MSG, INVALID_IAM_ROLE_NAME);
when(iamConnector.canIamAssume(INVALID_IAM_ROLE_NAME, IAM_ASSUME_ROLE_NAME))
.thenReturn(Mono.error(IamConnectorException.iamRoleCannotAssume(INVALID_IAM_ROLE_NAME, IAM_ASSUME_ROLE_NAME)));
StepVerifier.create(iamValidator.validate(jobDescriptorWithInvalidIam))
.assertNext(validationErrors -> validationErrorsContainsJust(validationErrors, errorMsg))
.verifyComplete();
}
@Test
public void testJobWithNonexistentIam() {
String errorMsg = String.format("%s %s", MISSING_IAM_ERROR_MSG, INVALID_IAM_ROLE_NAME);
when(iamConnector.canIamAssume(INVALID_IAM_ROLE_NAME, IAM_ASSUME_ROLE_NAME))
.thenReturn(Mono.error(IamConnectorException.iamRoleNotFound(INVALID_IAM_ROLE_NAME)));
Mono<Set<ValidationError>> validationErrorsMono = iamValidator.validate(jobDescriptorWithInvalidIam);
StepVerifier.create(validationErrorsMono)
.assertNext(validationErrors -> validationErrorsContainsJust(validationErrors, errorMsg))
.verifyComplete();
}
@Test
public void testIamSanitizationFailsOpen() {
when(iamConnector.getIamRole(INVALID_IAM_ROLE_NAME))
.thenReturn(Mono.error(IamConnectorException.iamRoleUnexpectedError(INVALID_IAM_ROLE_NAME)));
Mono<JobDescriptor> sanitized = iamValidator.sanitizeAndApply(jobDescriptorWithInvalidIam);
StepVerifier.create(sanitized)
.assertNext(jobDescriptor -> {
assertThat(jobDescriptor.getContainer().getSecurityProfile().getIamRole())
.isEqualTo(jobDescriptorWithInvalidIam.getContainer().getSecurityProfile().getIamRole());
assertThat(((JobDescriptor<?>) jobDescriptor).getAttributes())
.containsEntry(JobAttributes.JOB_ATTRIBUTES_SANITIZATION_SKIPPED_IAM, "true");
})
.verifyComplete();
}
private void validationErrorsContainsJust(Set<ValidationError> validationErrors, String errorMessage) {
assertThat(validationErrors).isNotNull();
assertThat(validationErrors.size()).isEqualTo(1);
assertThat(validationErrors).allMatch(validationError -> validationError.getDescription().startsWith(errorMessage));
}
}
| 9,371 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/MetricConstants.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway;
/**
* Set of metric related constants that establish consistent naming convention.
*/
public class MetricConstants {
public static final String METRIC_ROOT = "titusGateway.";
public static final String METRIC_ENDPOINT = METRIC_ROOT + "endpoint.";
public static final String METRIC_PROXY = METRIC_ENDPOINT + "proxy.";
public static final String METRIC_JOB_MANAGEMENT = METRIC_ROOT + "jobManagement.";
}
| 9,372 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/GatewayJerseyModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.endpoint;
import java.util.function.UnaryOperator;
import javax.inject.Named;
import javax.inject.Singleton;
import com.google.inject.Provides;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.governator.guice.jersey.GovernatorServletContainer;
import com.netflix.governator.providers.Advises;
import com.netflix.titus.gateway.endpoint.v2.rest.TitusMasterProxyServlet;
import com.netflix.titus.gateway.endpoint.v3.rest.SchedulerResource;
import com.netflix.titus.gateway.eviction.EvictionResource;
import com.netflix.titus.runtime.endpoint.common.rest.JsonMessageReaderWriter;
import com.netflix.titus.runtime.endpoint.common.rest.RestServerConfiguration;
import com.netflix.titus.runtime.endpoint.common.rest.TitusExceptionMapper;
import com.netflix.titus.runtime.endpoint.common.rest.filter.CallerContextFilter;
import com.netflix.titus.runtime.endpoint.common.rest.provider.InstrumentedResourceMethodDispatchAdapter;
import com.netflix.titus.runtime.endpoint.metadata.SimpleHttpCallMetadataResolver;
import com.netflix.titus.runtime.endpoint.v3.rest.AutoScalingResource;
import com.netflix.titus.runtime.endpoint.v3.rest.HealthResource;
import com.netflix.titus.runtime.endpoint.v3.rest.JobManagementResource;
import com.netflix.titus.runtime.endpoint.v3.rest.LoadBalancerResource;
import com.sun.jersey.api.core.DefaultResourceConfig;
import com.sun.jersey.guice.JerseyServletModule;
/**
* We use this module to wire up our endpoints.
*/
public final class GatewayJerseyModule extends JerseyServletModule {
@Override
protected void configureServlets() {
// Store HTTP servlet request data in thread local variable
filter("/api/v3/*").through(CallerContextFilter.class);
// Call metadata interceptor (see CallMetadataHeaders).
filter("/api/v3/*").through(SimpleHttpCallMetadataResolver.CallMetadataInterceptorFilter.class);
// Configure servlet that proxies requests to master
serve("/api/v2/*").with(TitusMasterProxyServlet.class);
// Configure servlet to serve resources for all other api paths
serve("/api/*").with(GovernatorServletContainer.class);
}
@Provides
@Singleton
public RestServerConfiguration getRestServerConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(RestServerConfiguration.class);
}
@Advises
@Singleton
@Named("governator")
UnaryOperator<DefaultResourceConfig> getConfig() {
return config -> {
// providers
config.getClasses().add(JsonMessageReaderWriter.class);
config.getClasses().add(TitusExceptionMapper.class);
config.getClasses().add(InstrumentedResourceMethodDispatchAdapter.class);
// resources
config.getClasses().add(HealthResource.class);
config.getClasses().add(JobManagementResource.class);
config.getClasses().add(EvictionResource.class);
config.getClasses().add(AutoScalingResource.class);
config.getClasses().add(SchedulerResource.class);
config.getClasses().add(LoadBalancerResource.class);
return config;
};
}
@Override
public boolean equals(Object obj) {
return obj != null && getClass().equals(obj.getClass());
}
@Override
public int hashCode() {
return getClass().hashCode();
}
}
| 9,373 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/GatewayGrpcModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.endpoint;
import javax.inject.Singleton;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.titus.gateway.endpoint.v3.grpc.DefaultSchedulerServiceGrpc;
import com.netflix.titus.gateway.endpoint.v3.grpc.GrpcEndpointConfiguration;
import com.netflix.titus.gateway.endpoint.v3.grpc.TitusGatewayGrpcServer;
import com.netflix.titus.gateway.eviction.EvictionModule;
import com.netflix.titus.grpc.protogen.AutoScalingServiceGrpc;
import com.netflix.titus.grpc.protogen.HealthGrpc;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.LoadBalancerServiceGrpc;
import com.netflix.titus.grpc.protogen.SchedulerServiceGrpc;
import com.netflix.titus.runtime.endpoint.v3.grpc.DefaultAutoScalingServiceGrpc;
import com.netflix.titus.runtime.endpoint.v3.grpc.DefaultHealthServiceGrpc;
import com.netflix.titus.runtime.endpoint.v3.grpc.DefaultJobManagementServiceGrpc;
import com.netflix.titus.runtime.endpoint.v3.grpc.DefaultLoadBalancerServiceGrpc;
public class GatewayGrpcModule extends AbstractModule {
@Override
protected void configure() {
install(new EvictionModule());
bind(HealthGrpc.HealthImplBase.class).to(DefaultHealthServiceGrpc.class);
bind(JobManagementServiceGrpc.JobManagementServiceImplBase.class).to(DefaultJobManagementServiceGrpc.class);
bind(AutoScalingServiceGrpc.AutoScalingServiceImplBase.class).to(DefaultAutoScalingServiceGrpc.class);
bind(LoadBalancerServiceGrpc.LoadBalancerServiceImplBase.class).to(DefaultLoadBalancerServiceGrpc.class);
bind(SchedulerServiceGrpc.SchedulerServiceImplBase.class).to(DefaultSchedulerServiceGrpc.class);
bind(TitusGatewayGrpcServer.class).asEagerSingleton();
}
@Provides
@Singleton
public GrpcEndpointConfiguration getGrpcEndpointConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(GrpcEndpointConfiguration.class);
}
}
| 9,374 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/GatewayEndpointModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.endpoint;
import com.google.inject.AbstractModule;
import com.netflix.governator.guice.jersey.GovernatorJerseySupportModule;
import com.netflix.titus.runtime.endpoint.metadata.CallMetadataResolveModule;
import com.netflix.titus.runtime.endpoint.resolver.HostCallerIdResolver;
import com.netflix.titus.runtime.endpoint.resolver.NoOpHostCallerIdResolver;
public class GatewayEndpointModule extends AbstractModule {
private final boolean enableREST;
public GatewayEndpointModule(boolean enableREST) {
this.enableREST = enableREST;
}
@Override
protected void configure() {
bind(HostCallerIdResolver.class).to(NoOpHostCallerIdResolver.class);
install(new CallMetadataResolveModule());
if (enableREST) {
install(new GovernatorJerseySupportModule());
install(new GatewayJerseyModule());
}
install(new GatewayGrpcModule());
}
}
| 9,375 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v2 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v2/rest/TitusMasterProxyServlet.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.endpoint.v2.rest;
import java.io.ByteArrayOutputStream;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.UriBuilder;
import com.google.common.collect.Sets;
import com.google.common.io.ByteStreams;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.common.network.http.Headers;
import com.netflix.titus.common.network.http.HttpClient;
import com.netflix.titus.common.network.http.Methods;
import com.netflix.titus.common.network.http.Request;
import com.netflix.titus.common.network.http.RequestBody;
import com.netflix.titus.common.network.http.Response;
import com.netflix.titus.common.util.StringExt;
import com.netflix.titus.gateway.MetricConstants;
import com.netflix.titus.gateway.startup.TitusGatewayConfiguration;
import com.netflix.titus.runtime.connector.titusmaster.Address;
import com.netflix.titus.runtime.connector.titusmaster.LeaderResolver;
import com.netflix.titus.runtime.connector.titusmaster.TitusMasterConnectorModule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Singleton
public class TitusMasterProxyServlet extends HttpServlet {
private static final Logger logger = LoggerFactory.getLogger(TitusMasterProxyServlet.class);
private static final Set<String> IGNORED_REQUEST_HEADERS = Sets.newHashSet("connection", "content-length", "date",
"keep-alive", "proxy-authenticate", "proxy-authorization", "te", "trailers", "transfer-encoding", "upgrade");
private static final Set<String> IGNORED_RESPONSE_HEADERS = Sets.newHashSet("connection", "content-length", "date",
"keep-alive", "proxy-authenticate", "proxy-authorization", "te", "trailers", "transfer-encoding", "upgrade");
private static final int MAX_BYTES_TO_BUFFER = 32_000;
private static final String TITUS_HEADER_CALLER_HOST_ADDRESS = "X-Titus-CallerHostAddress";
private final TitusGatewayConfiguration configuration;
private final Registry registry;
private final HttpClient httpClient;
private final LeaderResolver leaderResolver;
private final Id baseId;
@Inject
public TitusMasterProxyServlet(TitusGatewayConfiguration configuration,
Registry registry,
@Named(TitusMasterConnectorModule.TITUS_MASTER_CLIENT) HttpClient httpClient,
LeaderResolver leaderResolver) {
this.configuration = configuration;
this.registry = registry;
this.httpClient = httpClient;
this.leaderResolver = leaderResolver;
this.baseId = registry.createId(MetricConstants.METRIC_PROXY + "request");
}
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
proxyRequest(request, response);
}
@Override
protected void doHead(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
proxyRequest(request, response);
}
@Override
protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
proxyRequest(request, response);
}
@Override
protected void doPut(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
proxyRequest(request, response);
}
@Override
protected void doDelete(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
proxyRequest(request, response);
}
@Override
protected void doOptions(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
proxyRequest(request, response);
}
@Override
protected void doTrace(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
proxyRequest(request, response);
}
private void proxyRequest(HttpServletRequest request, HttpServletResponse response) throws IOException {
final long start = registry.clock().wallTime();
try {
doProxyRequest(request, response);
} catch (URISyntaxException e) {
logger.error("[PROXY ILLEGAL URI] Bad URI specified with error: ", e);
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
} catch (Exception e) {
logger.error("[PROXY UNKNOWN ERROR] Unable to proxy request with error: ", e);
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
} finally {
final long end = registry.clock().wallTime();
registry.timer(createId(request.getMethod(), response.getStatus())).record(end - start, TimeUnit.MILLISECONDS);
}
}
private void doProxyRequest(HttpServletRequest request, HttpServletResponse response) throws URISyntaxException, IOException {
Optional<Address> leaderOptional = leaderResolver.resolve();
if (!leaderOptional.isPresent()) {
response.setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE);
return;
}
String titusMasterUri = leaderOptional.get().toString();
String method = request.getMethod();
String remoteIp = request.getRemoteAddr();
URI requestUri = getServletRequestUri(request);
InputStream requestInputStream = configuration.isProxyErrorLoggingEnabled()
? new ByteCopyInputStream(request.getInputStream(), MAX_BYTES_TO_BUFFER) : request.getInputStream();
URI clientRequestUri = constructProxyUri(requestUri, titusMasterUri);
Headers clientHeaders = getAllHeaders(request);
Response clientResponse = null;
InputStream responseInputStream = null;
boolean exceptionOccurred = false;
try {
Request.Builder clientRequestBuilder = new Request.Builder()
.url(clientRequestUri.toString())
.method(method)
.headers(clientHeaders);
if (requestInputStream != null && Methods.isBodyAllowed(method)) {
clientRequestBuilder.body(RequestBody.create(requestInputStream));
}
clientResponse = httpClient.execute(clientRequestBuilder.build());
response.setStatus(clientResponse.getStatusCode().getCode());
Headers clientResponseHeaders = clientResponse.getHeaders();
clientResponseHeaders.names().forEach(name -> {
if (!IGNORED_RESPONSE_HEADERS.contains(name.toLowerCase())) {
clientResponseHeaders.values(name).forEach(value -> response.addHeader(name, value));
}
});
if (clientResponse.hasBody()) {
responseInputStream = clientResponse.getBody().get(InputStream.class);
responseInputStream = (configuration.isProxyErrorLoggingEnabled() && !clientResponse.isSuccessful()) ?
new ByteCopyInputStream(responseInputStream, MAX_BYTES_TO_BUFFER) : responseInputStream;
ByteStreams.copy(responseInputStream, response.getOutputStream());
}
} catch (Exception e) {
exceptionOccurred = true;
throw e;
} finally {
boolean logRequest = configuration.isProxyErrorLoggingEnabled() && (exceptionOccurred || (clientResponse != null && !clientResponse.isSuccessful()));
boolean logResponse = configuration.isProxyErrorLoggingEnabled() && (clientResponse != null && !clientResponse.isSuccessful());
String proxyErrorMessage = "";
if (logRequest) {
byte[] requestBodyBytes = new byte[0];
if (requestInputStream instanceof ByteCopyInputStream) {
requestBodyBytes = ((ByteCopyInputStream) requestInputStream).getCopiedBytes();
}
int requestContentLength = requestBodyBytes.length;
String requestBody = new String(requestBodyBytes);
proxyErrorMessage = "\n[PROXY ERROR REQUEST] " + remoteIp + " " + method + " " + requestUri
+ "\n\tHeaders: " + clientHeaders
+ "\n\tContent-Length: " + requestContentLength;
if (requestContentLength > 0) {
proxyErrorMessage += "\n\tBody: " + requestBody;
}
}
if (logResponse) {
byte[] responseBodyBytes = new byte[0];
if (responseInputStream instanceof ByteCopyInputStream) {
responseBodyBytes = ((ByteCopyInputStream) responseInputStream).getCopiedBytes();
}
int responseContentLength = responseBodyBytes.length;
String responseBody = new String(responseBodyBytes);
proxyErrorMessage += "\n[PROXY ERROR RESPONSE] " + response.getStatus() + " " + method + " " + clientRequestUri
+ "\n\tHeaders: " + clientResponse.getHeaders()
+ "\n\tContent-Length: " + responseContentLength;
if (responseContentLength > 0) {
proxyErrorMessage += "\n\tBody: " + responseBody;
}
if (clientResponse != null && clientResponse.hasBody()) {
clientResponse.getBody().close();
}
if (responseInputStream != null) {
responseInputStream.close();
}
}
if (StringExt.isNotEmpty(proxyErrorMessage)) {
logger.info(proxyErrorMessage);
}
}
}
private Headers getAllHeaders(HttpServletRequest request) {
Headers clientHeaders = new Headers();
Enumeration headerNames = request.getHeaderNames();
while (headerNames.hasMoreElements()) {
String key = (String) headerNames.nextElement();
String value = request.getHeader(key);
if (!IGNORED_REQUEST_HEADERS.contains(key.toLowerCase())) {
clientHeaders.put(key, value);
}
}
// Add Titus specific headers
String upstreamCaller = request.getHeader(TITUS_HEADER_CALLER_HOST_ADDRESS);
if (upstreamCaller != null) {
clientHeaders.put(TITUS_HEADER_CALLER_HOST_ADDRESS, upstreamCaller + ',' + request.getRemoteHost());
} else {
clientHeaders.put(TITUS_HEADER_CALLER_HOST_ADDRESS, request.getRemoteHost());
}
return clientHeaders;
}
private URI getServletRequestUri(HttpServletRequest request) throws URISyntaxException {
String uri = request.getRequestURL() + (request.getQueryString() != null ? "?" + request.getQueryString() : "");
return new URI(uri);
}
private URI constructProxyUri(URI requestUri, String titusMasterUri) {
URI masterUri = UriBuilder.fromUri(titusMasterUri).build();
UriBuilder uriBuilder = UriBuilder.fromUri(requestUri)
.scheme(masterUri.getScheme())
.host(masterUri.getHost())
.port(masterUri.getPort());
return uriBuilder.build();
}
private Id createId(String method, int statusCode) {
String status = (statusCode / 100) + "xx";
return baseId
.withTag("method", method)
.withTag("status", status)
.withTag("statusCode", String.valueOf(statusCode));
}
/**
* Basic implementation of an InputStream wrapper that copies the first N read bytes so that
* they can be read at a later time.
*/
static class ByteCopyInputStream extends FilterInputStream {
private int byteCopySize;
private ByteArrayOutputStream bytes = new ByteArrayOutputStream();
private int position;
public ByteCopyInputStream(InputStream inputStream, int byteCopySize) {
super(inputStream);
this.byteCopySize = byteCopySize;
}
@Override
public synchronized int read() throws IOException {
int byteRead = super.read();
if (byteRead > -1 && position < byteCopySize) {
bytes.write(byteRead);
position++;
}
return byteRead;
}
@Override
public synchronized int read(byte[] b, int off, int len) throws IOException {
int bytesRead = super.read(b, off, len);
if (bytesRead > 0) {
int available = byteCopySize - position;
if (available > 0) {
int length = (available < len) ? available : len;
bytes.write(b, off, length);
position += bytesRead;
}
}
return bytesRead;
}
public byte[] getCopiedBytes() {
if (position > 0) {
return Arrays.copyOfRange(bytes.toByteArray(), 0, position + 1);
}
return new byte[0];
}
}
}
| 9,376 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v3/SupplementaryServiceLocationConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.endpoint.v3;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.archaius.api.Config;
import com.netflix.titus.common.util.PropertiesExt;
@Singleton
public class SupplementaryServiceLocationConfiguration {
private static final String PREFIX = "titusGateway.supplementary.services";
private final Map<String, ServiceAddress> serviceMap;
@Inject
public SupplementaryServiceLocationConfiguration(Config config) {
Config serviceConfig = config.getPrefixedView(PREFIX);
Map<String, String> all = new HashMap<>();
serviceConfig.forEachProperty((k, v) -> all.put(k, v.toString()));
Map<String, Map<String, String>> serviceProperties = PropertiesExt.groupByRootName(all, 1);
Map<String, ServiceAddress> serviceMap = new HashMap<>();
serviceProperties.forEach((k, v) -> {
serviceMap.put(k, new ServiceAddress(v.get("host"), Integer.parseInt(v.get("grpcPort")), Integer.parseInt(v.get("httpPort"))));
});
this.serviceMap = Collections.unmodifiableMap(serviceMap);
}
public Map<String, ServiceAddress> getServices() {
return serviceMap;
}
class ServiceAddress {
private final String host;
private final int grpcPort;
private final int httpPort;
public ServiceAddress(String host, int grpcPort, int httpPort) {
this.host = host;
this.grpcPort = grpcPort;
this.httpPort = httpPort;
}
public String getHost() {
return host;
}
public int getGrpcPort() {
return grpcPort;
}
public int getHttpPort() {
return httpPort;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ServiceAddress that = (ServiceAddress) o;
return grpcPort == that.grpcPort &&
httpPort == that.httpPort &&
Objects.equals(host, that.host);
}
@Override
public int hashCode() {
return Objects.hash(host, grpcPort, httpPort);
}
@Override
public String toString() {
return "ServiceAddress{" +
"host='" + host + '\'' +
", grpcPort=" + grpcPort +
", httpPort=" + httpPort +
'}';
}
}
}
| 9,377 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v3/grpc/GrpcEndpointConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.endpoint.v3.grpc;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
import com.netflix.archaius.api.annotations.PropertyName;
/**
*/
@Configuration(prefix = "titusGateway.endpoint.grpc")
public interface GrpcEndpointConfiguration {
@PropertyName(name = "port")
@DefaultValue("7104")
int getPort();
/**
* Graceful shutdown time for GRPC server. If zero, shutdown happens immediately, and all client connections are
* terminated abruptly.
*/
@DefaultValue("30000")
long getShutdownTimeoutMs();
}
| 9,378 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v3/grpc/DefaultSchedulerServiceGrpc.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.endpoint.v3.grpc;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.gateway.service.v3.SchedulerService;
import com.netflix.titus.grpc.protogen.SchedulerServiceGrpc;
import com.netflix.titus.grpc.protogen.SchedulingResultEvent;
import com.netflix.titus.grpc.protogen.SchedulingResultRequest;
import io.grpc.stub.StreamObserver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Subscription;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.attachCancellingCallback;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.safeOnError;
@Singleton
public class DefaultSchedulerServiceGrpc extends SchedulerServiceGrpc.SchedulerServiceImplBase {
private static final Logger logger = LoggerFactory.getLogger(DefaultSchedulerServiceGrpc.class);
private final SchedulerService schedulerService;
@Inject
public DefaultSchedulerServiceGrpc(SchedulerService schedulerService) {
this.schedulerService = schedulerService;
}
@Override
public void getSchedulingResult(SchedulingResultRequest request, StreamObserver<SchedulingResultEvent> responseObserver) {
Subscription subscription = schedulerService.findLastSchedulingResult(request.getTaskId()).subscribe(
responseObserver::onNext,
e -> safeOnError(logger, e, responseObserver),
responseObserver::onCompleted
);
attachCancellingCallback(responseObserver, subscription);
}
@Override
public void observeSchedulingResults(SchedulingResultRequest request, StreamObserver<SchedulingResultEvent> responseObserver) {
Subscription subscription = schedulerService.observeSchedulingResults(request.getTaskId()).subscribe(
responseObserver::onNext,
e -> safeOnError(logger, e, responseObserver),
responseObserver::onCompleted
);
attachCancellingCallback(responseObserver, subscription);
}
} | 9,379 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v3/grpc/TitusGatewayGrpcServer.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.endpoint.v3.grpc;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.common.framework.fit.adapter.GrpcFitInterceptor;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.ExecutorsExt;
import com.netflix.titus.grpc.protogen.AutoScalingServiceGrpc;
import com.netflix.titus.grpc.protogen.AutoScalingServiceGrpc.AutoScalingServiceImplBase;
import com.netflix.titus.grpc.protogen.EvictionServiceGrpc;
import com.netflix.titus.grpc.protogen.EvictionServiceGrpc.EvictionServiceImplBase;
import com.netflix.titus.grpc.protogen.HealthGrpc;
import com.netflix.titus.grpc.protogen.HealthGrpc.HealthImplBase;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc.JobManagementServiceImplBase;
import com.netflix.titus.grpc.protogen.LoadBalancerServiceGrpc;
import com.netflix.titus.grpc.protogen.LoadBalancerServiceGrpc.LoadBalancerServiceImplBase;
import com.netflix.titus.grpc.protogen.SchedulerServiceGrpc;
import com.netflix.titus.grpc.protogen.SchedulerServiceGrpc.SchedulerServiceImplBase;
import com.netflix.titus.runtime.endpoint.common.grpc.interceptor.ErrorCatchingServerInterceptor;
import com.netflix.titus.runtime.endpoint.metadata.V3HeaderInterceptor;
import io.grpc.Server;
import io.grpc.ServerBuilder;
import io.grpc.ServerInterceptor;
import io.grpc.ServerInterceptors;
import io.grpc.ServiceDescriptor;
import io.grpc.protobuf.services.ProtoReflectionService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static java.util.Arrays.asList;
@Singleton
public class TitusGatewayGrpcServer {
private static final Logger LOG = LoggerFactory.getLogger(TitusGatewayGrpcServer.class);
private final HealthImplBase healthService;
private final JobManagementServiceImplBase jobManagementService;
private final EvictionServiceImplBase evictionService;
private final AutoScalingServiceImplBase appAutoScalingService;
private final LoadBalancerServiceImplBase loadBalancerService;
private final SchedulerServiceImplBase schedulerService;
private final GrpcEndpointConfiguration config;
private final TitusRuntime titusRuntime;
private final ExecutorService grpcCallbackExecutor;
private final AtomicBoolean started = new AtomicBoolean();
private Server server;
private int port;
@Inject
public TitusGatewayGrpcServer(
HealthImplBase healthService,
EvictionServiceImplBase evictionService,
JobManagementServiceImplBase jobManagementService,
AutoScalingServiceImplBase appAutoScalingService,
LoadBalancerServiceImplBase loadBalancerService,
SchedulerServiceImplBase schedulerService,
GrpcEndpointConfiguration config,
TitusRuntime titusRuntime) {
this.healthService = healthService;
this.evictionService = evictionService;
this.jobManagementService = jobManagementService;
this.appAutoScalingService = appAutoScalingService;
this.loadBalancerService = loadBalancerService;
this.schedulerService = schedulerService;
this.config = config;
this.titusRuntime = titusRuntime;
this.grpcCallbackExecutor = ExecutorsExt.instrumentedCachedThreadPool(titusRuntime.getRegistry(), "grpcCallbackExecutor");
}
public int getPort() {
return port;
}
@PostConstruct
public void start() {
if (started.getAndSet(true)) {
return;
}
this.port = config.getPort();
this.server = configure(ServerBuilder.forPort(port).executor(grpcCallbackExecutor))
.addService(ServerInterceptors.intercept(
healthService,
createInterceptors(HealthGrpc.getServiceDescriptor())
))
.addService(ServerInterceptors.intercept(
jobManagementService,
createInterceptors(JobManagementServiceGrpc.getServiceDescriptor())
))
.addService(ServerInterceptors.intercept(
evictionService,
createInterceptors(EvictionServiceGrpc.getServiceDescriptor())
))
.addService(ServerInterceptors.intercept(
appAutoScalingService,
createInterceptors(AutoScalingServiceGrpc.getServiceDescriptor())
))
.addService(ServerInterceptors.intercept(
schedulerService,
createInterceptors(SchedulerServiceGrpc.getServiceDescriptor())
))
.addService(ServerInterceptors.intercept(
loadBalancerService,
createInterceptors(LoadBalancerServiceGrpc.getServiceDescriptor())
))
.addService(ProtoReflectionService.newInstance())
.build();
LOG.info("Starting gRPC server on port {}.", port);
try {
this.server.start();
this.port = server.getPort();
} catch (final IOException e) {
throw new RuntimeException(e);
}
LOG.info("Started gRPC server on port {}.", port);
}
@PreDestroy
public void shutdown() {
if (server.isShutdown()) {
return;
}
long timeoutMs = config.getShutdownTimeoutMs();
try {
if (timeoutMs <= 0) {
server.shutdownNow();
} else {
server.shutdown();
try {
server.awaitTermination(timeoutMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException ignore) {
}
if (!server.isShutdown()) {
server.shutdownNow();
}
}
} finally {
grpcCallbackExecutor.shutdown();
if (timeoutMs > 0) {
try {
grpcCallbackExecutor.awaitTermination(timeoutMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException ignore) {
}
}
}
}
/**
* Override to change default server configuration.
*/
protected ServerBuilder configure(ServerBuilder serverBuilder) {
return serverBuilder;
}
/**
* Override to add server side interceptors.
*/
protected List<ServerInterceptor> createInterceptors(ServiceDescriptor serviceDescriptor) {
return GrpcFitInterceptor.appendIfFitEnabled(
asList(new ErrorCatchingServerInterceptor(), new V3HeaderInterceptor()),
titusRuntime
);
}
}
| 9,380 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v3/rest/SchedulerResource.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.endpoint.v3.rest;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import com.netflix.titus.gateway.service.v3.SchedulerService;
import com.netflix.titus.grpc.protogen.SchedulingResultEvent;
import com.netflix.titus.runtime.endpoint.common.rest.Responses;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
@Api(tags = "Scheduler")
@Path("/v3/scheduler")
@Singleton
public class SchedulerResource {
private final SchedulerService schedulerService;
@Inject
public SchedulerResource(SchedulerService schedulerService) {
this.schedulerService = schedulerService;
}
@GET
@ApiOperation("Find scheduling result for a task")
@Path("/results/{id}")
public SchedulingResultEvent findLastSchedulingResult(@PathParam("id") String taskId) {
return Responses.fromSingleValueObservable(schedulerService.findLastSchedulingResult(taskId));
}
}
| 9,381 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v3/rest | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/endpoint/v3/rest/representation/TierWrapper.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.endpoint.v3.rest.representation;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.titus.api.model.Tier;
public class TierWrapper {
private final Tier tier;
@JsonCreator
public TierWrapper(@JsonProperty("tier") Tier tier) {
this.tier = tier;
}
public Tier getTier() {
return tier;
}
}
| 9,382 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/TitusManagementService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3;
public interface TitusManagementService {
}
| 9,383 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/SchedulerService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3;
import com.netflix.titus.grpc.protogen.SchedulingResultEvent;
import rx.Observable;
/**
* Gateway service for the scheduler component.
*/
public interface SchedulerService {
/**
* Returns the last known scheduling result for a task.
*
* @return {@link Observable#empty()} if the task is not found or the scheduling result otherwise
*/
Observable<SchedulingResultEvent> findLastSchedulingResult(String taskId);
/**
* Observe Fenzo scheduling results for a task. The stream is completed when the task is successfully scheduled or
* removed from the Fenzo queue.
*/
Observable<SchedulingResultEvent> observeSchedulingResults(String taskId);
}
| 9,384 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/V3ServiceModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3;
import javax.inject.Singleton;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.titus.gateway.service.v3.internal.DefaultAutoScalingService;
import com.netflix.titus.gateway.service.v3.internal.DefaultHealthService;
import com.netflix.titus.gateway.service.v3.internal.DefaultLoadBalancerService;
import com.netflix.titus.gateway.service.v3.internal.DefaultSchedulerService;
import com.netflix.titus.gateway.service.v3.internal.DefaultTitusManagementService;
import com.netflix.titus.gateway.service.v3.internal.DisruptionBudgetSanitizerConfiguration;
import com.netflix.titus.gateway.service.v3.internal.GatewayConfiguration;
import com.netflix.titus.gateway.service.v3.internal.GatewayJobServiceGateway;
import com.netflix.titus.runtime.connector.GrpcClientConfiguration;
import com.netflix.titus.runtime.connector.GrpcRequestConfiguration;
import com.netflix.titus.runtime.jobmanager.JobManagerConfiguration;
import com.netflix.titus.runtime.jobmanager.gateway.JobServiceGateway;
import com.netflix.titus.runtime.service.AutoScalingService;
import com.netflix.titus.runtime.service.HealthService;
import com.netflix.titus.runtime.service.LoadBalancerService;
public class V3ServiceModule extends AbstractModule {
@Override
protected void configure() {
bind(HealthService.class).to(DefaultHealthService.class);
bind(JobServiceGateway.class).to(GatewayJobServiceGateway.class);
bind(AutoScalingService.class).to(DefaultAutoScalingService.class);
bind(LoadBalancerService.class).to(DefaultLoadBalancerService.class);
bind(TitusManagementService.class).to(DefaultTitusManagementService.class);
bind(SchedulerService.class).to(DefaultSchedulerService.class);
}
@Provides
@Singleton
public GrpcClientConfiguration getGrpcClientConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(GrpcClientConfiguration.class);
}
@Provides
@Singleton
public JobManagerConfiguration getJobManagerConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(JobManagerConfiguration.class);
}
@Provides
@Singleton
public GrpcRequestConfiguration getChannelTunablesConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(GrpcRequestConfiguration.class);
}
@Provides
@Singleton
public GatewayConfiguration getGatewayConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(GatewayConfiguration.class);
}
@Provides
@Singleton
public DisruptionBudgetSanitizerConfiguration getDisruptionBudgetSanitizerConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(DisruptionBudgetSanitizerConfiguration.class);
}
}
| 9,385 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/DefaultAutoScalingService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.google.protobuf.Empty;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.grpc.protogen.AutoScalingServiceGrpc.AutoScalingServiceStub;
import com.netflix.titus.grpc.protogen.DeletePolicyRequest;
import com.netflix.titus.grpc.protogen.GetPolicyResult;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.PutPolicyRequest;
import com.netflix.titus.grpc.protogen.ScalingPolicyID;
import com.netflix.titus.grpc.protogen.UpdatePolicyRequest;
import com.netflix.titus.runtime.connector.GrpcClientConfiguration;
import com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil;
import com.netflix.titus.runtime.service.AutoScalingService;
import io.grpc.stub.StreamObserver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Completable;
import rx.Observable;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createRequestCompletable;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createRequestObservable;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createSimpleClientResponseObserver;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createWrappedStub;
@Singleton
public class DefaultAutoScalingService implements AutoScalingService {
private static Logger logger = LoggerFactory.getLogger(DefaultAutoScalingService.class);
private final GrpcClientConfiguration configuration;
private AutoScalingServiceStub client;
@Inject
public DefaultAutoScalingService(GrpcClientConfiguration configuration, AutoScalingServiceStub client) {
this.configuration = configuration;
this.client = client;
}
@Override
public Observable<GetPolicyResult> getJobScalingPolicies(JobId request, CallMetadata callMetadata) {
logger.info("Getting policy for JobId {}", request);
return createRequestObservable(emitter -> {
StreamObserver<GetPolicyResult> streamObserver = createSimpleClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, configuration.getRequestTimeout()).getJobScalingPolicies(request, streamObserver);
}, configuration.getRequestTimeout());
}
@Override
public Observable<ScalingPolicyID> setAutoScalingPolicy(PutPolicyRequest request, CallMetadata callMetadata) {
logger.info("Setting policy request {}", request);
return createRequestObservable(emitter -> {
StreamObserver<ScalingPolicyID> streamObserver = createSimpleClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, configuration.getRequestTimeout()).setAutoScalingPolicy(request, streamObserver);
}, configuration.getRequestTimeout());
}
@Override
public Observable<GetPolicyResult> getScalingPolicy(ScalingPolicyID request, CallMetadata callMetadata) {
return createRequestObservable(emitter -> {
StreamObserver<GetPolicyResult> streamObserver = createSimpleClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, configuration.getRequestTimeout()).getScalingPolicy(request, streamObserver);
}, configuration.getRequestTimeout());
}
@Override
public Observable<GetPolicyResult> getAllScalingPolicies(CallMetadata callMetadata) {
return createRequestObservable(emitter -> {
StreamObserver<GetPolicyResult> streamObserver = createSimpleClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, configuration.getRequestTimeout()).getAllScalingPolicies(Empty.getDefaultInstance(), streamObserver);
}, configuration.getRequestTimeout());
}
@Override
public Completable deleteAutoScalingPolicy(DeletePolicyRequest request, CallMetadata callMetadata) {
return createRequestCompletable(emitter -> {
StreamObserver<Empty> streamObserver = GrpcUtil.createEmptyClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, configuration.getRequestTimeout()).deleteAutoScalingPolicy(request, streamObserver);
}, configuration.getRequestTimeout());
}
@Override
public Completable updateAutoScalingPolicy(UpdatePolicyRequest request, CallMetadata callMetadata) {
return createRequestCompletable(emitter -> {
StreamObserver<Empty> streamObserver = GrpcUtil.createEmptyClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, configuration.getRequestTimeout()).updateAutoScalingPolicy(request, streamObserver);
}, configuration.getRequestTimeout());
}
}
| 9,386 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/ExtendedJobSanitizer.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.Predicate;
import javax.inject.Named;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.titus.api.FeatureRolloutPlans;
import com.netflix.titus.api.jobmanager.model.job.ContainerResources;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.SecurityProfile;
import com.netflix.titus.api.jobmanager.model.job.sanitizer.JobAssertions;
import com.netflix.titus.api.service.TitusServiceException;
import com.netflix.titus.common.model.sanitizer.EntitySanitizer;
import com.netflix.titus.common.model.sanitizer.ValidationError;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.feature.FeatureCompliance;
import com.netflix.titus.common.util.feature.FeatureCompliance.NonCompliance;
import com.netflix.titus.runtime.jobmanager.JobManagerConfiguration;
import static com.netflix.titus.api.FeatureRolloutPlans.CONTAINER_ACCOUNT_ID_AND_SUBNETS_REQUIRED_FEATURE;
import static com.netflix.titus.api.FeatureRolloutPlans.ENVIRONMENT_VARIABLE_NAMES_STRICT_VALIDATION_FEATURE;
import static com.netflix.titus.api.FeatureRolloutPlans.SECURITY_GROUPS_REQUIRED_FEATURE;
import static com.netflix.titus.api.jobmanager.JobAttributes.JOB_CONTAINER_ATTRIBUTE_ACCOUNT_ID;
import static com.netflix.titus.api.jobmanager.JobAttributes.JOB_CONTAINER_ATTRIBUTE_SUBNETS;
import static com.netflix.titus.api.jobmanager.model.job.sanitizer.JobSanitizerBuilder.JOB_STRICT_SANITIZER;
import static com.netflix.titus.common.util.feature.FeatureComplianceTypes.collectComplianceMetrics;
import static com.netflix.titus.common.util.feature.FeatureComplianceTypes.logNonCompliant;
import static com.netflix.titus.common.util.feature.FeatureComplianceTypes.mergeComplianceValidators;
/**
* Extends the default job model sanitizer with extra checks.
*/
class ExtendedJobSanitizer implements EntitySanitizer {
private static final String TITUS_NON_COMPLIANT_ROOT_NAME = "titus.noncompliant";
private static final String TITUS_NON_COMPLIANT = TITUS_NON_COMPLIANT_ROOT_NAME + ".";
@VisibleForTesting
static final String TITUS_NON_COMPLIANT_FEATURES = TITUS_NON_COMPLIANT + "features";
private final JobManagerConfiguration jobManagerConfiguration;
private final EntitySanitizer entitySanitizer;
private final DisruptionBudgetSanitizer disruptionBudgetSanitizer;
private final Predicate<JobDescriptor> securityGroupsRequiredPredicate;
private final Predicate<JobDescriptor> environmentVariableNamesStrictValidationPredicate;
private final FeatureCompliance<JobDescriptor<?>> jobComplianceChecker;
public ExtendedJobSanitizer(JobManagerConfiguration jobManagerConfiguration,
JobAssertions jobAssertions,
@Named(JOB_STRICT_SANITIZER) EntitySanitizer entitySanitizer,
DisruptionBudgetSanitizer disruptionBudgetSanitizer,
@Named(SECURITY_GROUPS_REQUIRED_FEATURE) Predicate<JobDescriptor> securityGroupsRequiredPredicate,
@Named(ENVIRONMENT_VARIABLE_NAMES_STRICT_VALIDATION_FEATURE) Predicate<JobDescriptor> environmentVariableNamesStrictValidationPredicate,
TitusRuntime titusRuntime) {
this.jobManagerConfiguration = jobManagerConfiguration;
this.entitySanitizer = entitySanitizer;
this.disruptionBudgetSanitizer = disruptionBudgetSanitizer;
this.securityGroupsRequiredPredicate = securityGroupsRequiredPredicate;
this.environmentVariableNamesStrictValidationPredicate = environmentVariableNamesStrictValidationPredicate;
this.jobComplianceChecker = logNonCompliant(collectComplianceMetrics(titusRuntime.getRegistry(),
mergeComplianceValidators(
JobFeatureComplianceChecks.missingSecurityGroups(),
JobFeatureComplianceChecks.missingIamRole(),
JobFeatureComplianceChecks.environmentVariablesNames(jobAssertions),
JobFeatureComplianceChecks.entryPointViolations(),
JobFeatureComplianceChecks.minDiskSize(jobManagerConfiguration),
JobFeatureComplianceChecks.noDisruptionBudget(),
JobFeatureComplianceChecks.missingContainerAccountIdAndSubnets(jobManagerConfiguration)
))
);
}
@Override
public <T> Set<ValidationError> validate(T entity) {
return entitySanitizer.validate(entity);
}
@Override
public <T> Optional<T> sanitize(T entity) {
T sanitized = entitySanitizer.sanitize(entity).orElse(entity);
if (sanitized instanceof com.netflix.titus.api.jobmanager.model.job.JobDescriptor) {
sanitized = (T) sanitizeJobDescriptor((JobDescriptor) sanitized);
}
return entity == sanitized ? Optional.empty() : Optional.of(sanitized);
}
private JobDescriptor<?> sanitizeJobDescriptor(JobDescriptor<?> providedJobDescriptor) {
JobDescriptor<?> jobDescriptorWithAllowedAttributes = resetAttributes(providedJobDescriptor);
return jobComplianceChecker.checkCompliance(jobDescriptorWithAllowedAttributes).map(violations -> {
JobDescriptor sanitized = jobDescriptorWithAllowedAttributes;
if (!securityGroupsRequiredPredicate.test(jobDescriptorWithAllowedAttributes)) {
// Missing security groups
SecurityProfile.Builder securityProfileBuilder = jobDescriptorWithAllowedAttributes.getContainer().getSecurityProfile().toBuilder();
violations.findViolation(SECURITY_GROUPS_REQUIRED_FEATURE).ifPresent(report ->
securityProfileBuilder.withSecurityGroups(jobManagerConfiguration.getDefaultSecurityGroups())
);
// Missing IAM role
violations.findViolation(FeatureRolloutPlans.IAM_ROLE_REQUIRED_FEATURE).ifPresent(report ->
securityProfileBuilder.withIamRole(jobManagerConfiguration.getDefaultIamRole())
);
sanitized = sanitized.toBuilder()
.withContainer(sanitized.getContainer().toBuilder()
.withSecurityProfile(securityProfileBuilder.build())
.build()
).build();
}
// Min disk size
NonCompliance<JobDescriptor<?>> diskSizeViolation = violations.findViolation(FeatureRolloutPlans.MIN_DISK_SIZE_STRICT_VALIDATION_FEATURE).orElse(null);
if (diskSizeViolation != null) {
ContainerResources containerResources = sanitized.getContainer().getContainerResources();
sanitized = sanitized.toBuilder().withContainer(sanitized.getContainer().toBuilder()
.withContainerResources(
containerResources.toBuilder().withDiskMB(jobManagerConfiguration.getMinDiskSizeMB()).build()
).build()
).build();
}
// TODO Once not needed, remove this code and add the field level validator which invokes method JobAssertions#validateEnvironmentVariableNames.
// We have to throw the exception here, as we cannot conditionally check violations using annotations.
violations.findViolation(ENVIRONMENT_VARIABLE_NAMES_STRICT_VALIDATION_FEATURE).ifPresent(nonCompliance -> {
if (environmentVariableNamesStrictValidationPredicate.test(jobDescriptorWithAllowedAttributes)) {
throw TitusServiceException.invalidArgument("Environment variable validation error: " + nonCompliance.toErrorMessage());
}
});
Map<String, String> defaultContainerAttributes = new HashMap<>();
violations.findViolation(CONTAINER_ACCOUNT_ID_AND_SUBNETS_REQUIRED_FEATURE).ifPresent(nonCompliance -> {
defaultContainerAttributes.put(JOB_CONTAINER_ATTRIBUTE_ACCOUNT_ID, jobManagerConfiguration.getDefaultContainerAccountId());
defaultContainerAttributes.put(JOB_CONTAINER_ATTRIBUTE_SUBNETS, jobManagerConfiguration.getDefaultSubnets());
});
if (!CollectionsExt.isNullOrEmpty(defaultContainerAttributes)) {
Map<String, String> sanitizedContainerAttributes = new HashMap<>(sanitized.getContainer().getAttributes());
sanitizedContainerAttributes.putAll(defaultContainerAttributes);
sanitized = sanitized
.toBuilder()
.withContainer(sanitized.getContainer().toBuilder().withAttributes(sanitizedContainerAttributes).build())
.build();
}
// Set default disruption budget if not set
sanitized = disruptionBudgetSanitizer.sanitize(sanitized);
return sanitized.toBuilder()
.withAttributes(CollectionsExt.merge(jobDescriptorWithAllowedAttributes.getAttributes(), buildNonComplianceJobAttributeMap(violations)))
.build();
}).orElse(jobDescriptorWithAllowedAttributes);
}
private JobDescriptor<?> resetAttributes(JobDescriptor<?> jobDescriptor) {
Map<String, String> attributes = jobDescriptor.getAttributes();
if (attributes.isEmpty()) {
return jobDescriptor;
}
Map<String, String> allowedAttributes = new HashMap<>();
attributes.forEach((key, value) -> {
if (!key.startsWith(TITUS_NON_COMPLIANT_ROOT_NAME)) {
allowedAttributes.put(key, value);
}
});
return allowedAttributes.size() == attributes.size()
? jobDescriptor
: jobDescriptor.toBuilder().withAttributes(allowedAttributes).build();
}
private Map<String, String> buildNonComplianceJobAttributeMap(FeatureCompliance.NonComplianceList<JobDescriptor<?>> violations) {
StringBuilder violatedFeaturesBuilder = new StringBuilder();
Map<String, String> violationJobAttributes = new HashMap<>();
violations.getViolations().forEach(violation -> {
violatedFeaturesBuilder.append(violation.getFeatureId()).append(',');
String detailsPrefix = TITUS_NON_COMPLIANT + "details." + violation.getFeatureId() + '.';
violation.getContext().forEach((key, value) -> {
violationJobAttributes.put(detailsPrefix + key, value);
});
});
violationJobAttributes.put(TITUS_NON_COMPLIANT_FEATURES, violatedFeaturesBuilder.substring(0, violatedFeaturesBuilder.length() - 1));
return violationJobAttributes;
}
}
| 9,387 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/JobFeatureComplianceChecks.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Predicate;
import java.util.regex.Pattern;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.titus.api.FeatureRolloutPlans;
import com.netflix.titus.api.jobmanager.model.job.ContainerResources;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.model.job.migration.MigrationPolicy;
import com.netflix.titus.api.jobmanager.model.job.sanitizer.JobAssertions;
import com.netflix.titus.api.json.ObjectMappers;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.StringExt;
import com.netflix.titus.common.util.feature.FeatureCompliance;
import com.netflix.titus.common.util.feature.FeatureCompliance.NonComplianceList;
import com.netflix.titus.grpc.protogen.NetworkConfiguration;
import com.netflix.titus.runtime.jobmanager.JobManagerConfiguration;
import static com.netflix.titus.api.FeatureRolloutPlans.CONTAINER_ACCOUNT_ID_AND_SUBNETS_REQUIRED_FEATURE;
import static com.netflix.titus.api.FeatureRolloutPlans.ENTRY_POINT_STRICT_VALIDATION_FEATURE;
import static com.netflix.titus.api.FeatureRolloutPlans.ENVIRONMENT_VARIABLE_NAMES_STRICT_VALIDATION_FEATURE;
import static com.netflix.titus.api.FeatureRolloutPlans.IAM_ROLE_REQUIRED_FEATURE;
import static com.netflix.titus.api.FeatureRolloutPlans.MIN_DISK_SIZE_STRICT_VALIDATION_FEATURE;
import static com.netflix.titus.api.FeatureRolloutPlans.SECURITY_GROUPS_REQUIRED_FEATURE;
import static com.netflix.titus.api.jobmanager.JobAttributes.JOB_CONTAINER_ATTRIBUTE_ACCOUNT_ID;
import static com.netflix.titus.api.jobmanager.JobAttributes.JOB_CONTAINER_ATTRIBUTE_SUBNETS;
class JobFeatureComplianceChecks {
@VisibleForTesting
static final String DISRUPTION_BUDGET_FEATURE = "disruptionBudget";
private static final Map<String, String> NO_ACCOUNT_ID_AND_SUBNETS_CONTAINER_ATTRIBUTES_CONTEXT = Collections.singletonMap("noContainerAccountIdAndSubnets", "Container accountId and/or subnet container attributes are empty/inconsistent");
private static final Map<String, String> NO_IAM_ROLE_CONTEXT = Collections.singletonMap("noIamRole", "IAM role not set");
private static final Map<String, String> NO_SECURITY_GROUPS_CONTEXT = Collections.singletonMap("noSecurityGroups", "Security groups not set");
private static final Map<String, String> ENTRY_POINT_WITH_SPACES_CONTEXT = Collections.singletonMap("entryPointBinaryWithSpaces", "Entry point contains spaces");
private static final Predicate<String> CONTAINS_SPACES = Pattern.compile(".*\\s+.*").asPredicate();
/**
* See {@link FeatureRolloutPlans#ENVIRONMENT_VARIABLE_NAMES_STRICT_VALIDATION_FEATURE}.
*/
static FeatureCompliance<JobDescriptor<?>> environmentVariablesNames(JobAssertions jobAssertions) {
return jobDescriptor -> {
Map<String, String> context = jobAssertions.validateEnvironmentVariableNames(jobDescriptor.getContainer().getEnv());
if (context.isEmpty()) {
return Optional.empty();
}
return Optional.of(NonComplianceList.of(
ENVIRONMENT_VARIABLE_NAMES_STRICT_VALIDATION_FEATURE,
jobDescriptor,
context,
"Environment variable names may include only ASCII letters, digits and '_', and the first letter cannot be a digit."
));
};
}
/**
* See {@link FeatureRolloutPlans#IAM_ROLE_REQUIRED_FEATURE}.
*/
static FeatureCompliance<JobDescriptor<?>> missingIamRole() {
return jobDescriptor -> {
if (!jobDescriptor.getContainer().getSecurityProfile().getIamRole().isEmpty()) {
return Optional.empty();
}
return Optional.of(NonComplianceList.of(
IAM_ROLE_REQUIRED_FEATURE,
jobDescriptor,
NO_IAM_ROLE_CONTEXT,
"IAM role not set in the job descriptor"
));
};
}
/**
* See {@link FeatureRolloutPlans#SECURITY_GROUPS_REQUIRED_FEATURE}.
*/
static FeatureCompliance<JobDescriptor<?>> missingSecurityGroups() {
return jobDescriptor -> {
if (!jobDescriptor.getContainer().getSecurityProfile().getSecurityGroups().isEmpty()) {
return Optional.empty();
}
// On the HighScale network mode, it is important that the security group *not* be set,
// as the purpose of the HighScale network mode is to enforce unified security groups
// on the backend. Security groups are not configurable by the user with this mode.
if (jobDescriptor.getNetworkConfiguration().getNetworkMode() == NetworkConfiguration.NetworkMode.HighScale.getNumber() ) {
return Optional.empty();
}
return Optional.of(NonComplianceList.of(
SECURITY_GROUPS_REQUIRED_FEATURE,
jobDescriptor,
NO_SECURITY_GROUPS_CONTEXT,
"At least one security group must be set in the job descriptor"
));
};
}
/**
* A feature compliance is violated if there is a default accountId and subnets combination present in the {@link JobManagerConfiguration} for the deployment stack
* but the container attributes in the {@link JobDescriptor} are missing one or both values.
*
* @param jobManagerConfiguration {@link JobManagerConfiguration}
* @return feature compliance evaluation
*/
static FeatureCompliance<JobDescriptor<?>> missingContainerAccountIdAndSubnets(JobManagerConfiguration jobManagerConfiguration) {
return jobDescriptor -> {
String defaultAccountId = jobManagerConfiguration.getDefaultContainerAccountId();
String defaultSubnets = jobManagerConfiguration.getDefaultSubnets();
// Ignore this compliance check unless both default accountId and subnet configuration properties are set implying our
// intent to aid the job descriptor sanitization
if (StringExt.isEmpty(defaultAccountId) || StringExt.isEmpty(defaultSubnets)) {
return Optional.empty();
}
// On the HighScale network mode, it is important that the account/subents *not* be set.
// Subnets/Accounts are not configurable by the user with this mode.
if (jobDescriptor.getNetworkConfiguration().getNetworkMode() == NetworkConfiguration.NetworkMode.HighScale.getNumber() ) {
return Optional.empty();
}
String accountIdContainerAttribute = jobDescriptor.getContainer().getAttributes().get(JOB_CONTAINER_ATTRIBUTE_ACCOUNT_ID);
String subnetContainerAttribute = jobDescriptor.getContainer().getAttributes().get(JOB_CONTAINER_ATTRIBUTE_SUBNETS);
// Feature compliance is violated if either (i) both attributes are not set, or (ii) accountId is set to the default value but the subnets value is not set.
// In either case, we should take action in response to this violation.
if ((StringExt.isEmpty(accountIdContainerAttribute) || defaultAccountId.equals(accountIdContainerAttribute)) && StringExt.isEmpty(subnetContainerAttribute)) {
return Optional.of(NonComplianceList.of(
CONTAINER_ACCOUNT_ID_AND_SUBNETS_REQUIRED_FEATURE,
jobDescriptor,
NO_ACCOUNT_ID_AND_SUBNETS_CONTAINER_ATTRIBUTES_CONTEXT,
"accountId and/or subnets container attributes are not set"
));
}
return Optional.empty();
};
}
/**
* See {@link FeatureRolloutPlans#ENTRY_POINT_STRICT_VALIDATION_FEATURE}.
*/
static FeatureCompliance<JobDescriptor<?>> entryPointViolations() {
return jobDescriptor -> {
List<String> entryPoint = jobDescriptor.getContainer().getEntryPoint();
List<String> command = jobDescriptor.getContainer().getCommand();
if (!CollectionsExt.isNullOrEmpty(entryPoint) && CollectionsExt.isNullOrEmpty(command) && CONTAINS_SPACES.test(entryPoint.get(0))) {
return Optional.of(NonComplianceList.of(
ENTRY_POINT_STRICT_VALIDATION_FEATURE,
jobDescriptor,
ENTRY_POINT_WITH_SPACES_CONTEXT,
"First entry point value cannot contain spaces"
));
}
return Optional.empty();
};
}
/**
* See {@link FeatureRolloutPlans#MIN_DISK_SIZE_STRICT_VALIDATION_FEATURE}.
*/
static FeatureCompliance<JobDescriptor<?>> minDiskSize(JobManagerConfiguration jobManagerConfiguration) {
return jobDescriptor -> {
ContainerResources containerResources = jobDescriptor.getContainer().getContainerResources();
int minDiskSize = jobManagerConfiguration.getMinDiskSizeMB();
if (containerResources.getDiskMB() >= minDiskSize) {
return Optional.empty();
}
return Optional.of(NonComplianceList.of(
MIN_DISK_SIZE_STRICT_VALIDATION_FEATURE,
jobDescriptor,
Collections.singletonMap("diskSizeLessThanMin", String.format("Minimum disk size is %sMB, but is set %sMB", minDiskSize, containerResources.getDiskMB())),
String.format("Job descriptor must declare disk size that is no less than %sMB", minDiskSize)
));
};
}
/**
* Disruption budget is not required to be set by clients.
*/
static FeatureCompliance<JobDescriptor<?>> noDisruptionBudget() {
return jobDescriptor -> {
if (JobFunctions.hasDisruptionBudget(jobDescriptor)) {
return Optional.empty();
}
String legacyMigrationPolicyInfo;
if (JobFunctions.isBatchJob(jobDescriptor)) {
legacyMigrationPolicyInfo = "no migration policy (batch job)";
} else {
MigrationPolicy migrationPolicy = ((ServiceJobExt) jobDescriptor.getExtensions()).getMigrationPolicy();
legacyMigrationPolicyInfo = "service job with legacy migration policy: " + toString(migrationPolicy);
}
return Optional.of(NonComplianceList.of(
DISRUPTION_BUDGET_FEATURE,
jobDescriptor,
Collections.singletonMap("legacyMigration", legacyMigrationPolicyInfo),
"Job descriptor without disruption budget"
));
};
}
private static String toString(MigrationPolicy migrationPolicy) {
try {
return (migrationPolicy == null ? "none" : ObjectMappers.storeMapper().writeValueAsString(migrationPolicy));
} catch (Exception e) {
return String.format("<%s>", e.getMessage());
}
}
}
| 9,388 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/TaskDataInjector.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.api.FeatureActivationConfiguration;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.grpc.protogen.BasicImage;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.MigrationDetails;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskQueryResult;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.runtime.connector.kubernetes.fabric8io.Fabric8IOConnector;
import com.netflix.titus.runtime.connector.relocation.RelocationDataReplicator;
import io.fabric8.kubernetes.api.model.ContainerState;
import io.fabric8.kubernetes.api.model.ContainerStatus;
import io.fabric8.kubernetes.api.model.Pod;
import static com.netflix.titus.common.kube.Annotations.AnnotationKeySuffixContainers;
import static com.netflix.titus.common.kube.Annotations.AnnotationKeySuffixContainersSidecar;
import static com.netflix.titus.runtime.kubernetes.KubeConstants.ANNOTATION_KEY_IMAGE_TAG_PREFIX;
@Singleton
class TaskDataInjector {
private final Fabric8IOConnector kubeApiConnector;
private final FeatureActivationConfiguration featureActivationConfiguration;
private final RelocationDataReplicator relocationDataReplicator;
@Inject
TaskDataInjector(
FeatureActivationConfiguration featureActivationConfiguration,
RelocationDataReplicator relocationDataReplicator,
Fabric8IOConnector kubeApiConnector) {
this.featureActivationConfiguration = featureActivationConfiguration;
this.relocationDataReplicator = relocationDataReplicator;
this.kubeApiConnector = kubeApiConnector;
}
JobChangeNotification injectIntoTaskUpdateEvent(JobChangeNotification event) {
if (event.getNotificationCase() != JobChangeNotification.NotificationCase.TASKUPDATE) {
return event;
}
Task updatedTask = event.getTaskUpdate().getTask();
if (featureActivationConfiguration.isInjectingContainerStatesEnabled()) {
updatedTask = newTaskWithContainerState(updatedTask);
}
if (featureActivationConfiguration.isMergingTaskMigrationPlanInGatewayEnabled()) {
updatedTask = newTaskWithRelocationPlan(updatedTask, relocationDataReplicator.getCurrent().getPlans().get(updatedTask.getId()));
}
// Nothing changed so return the input event.
if (updatedTask == event.getTaskUpdate().getTask()) {
return event;
}
return event.toBuilder().setTaskUpdate(
event.getTaskUpdate().toBuilder().setTask(updatedTask).build()
).build();
}
Task injectIntoTask(Task task) {
Task decoratedTask = task;
if (featureActivationConfiguration.isInjectingContainerStatesEnabled()) {
decoratedTask = newTaskWithContainerState(task);
}
if (featureActivationConfiguration.isMergingTaskMigrationPlanInGatewayEnabled()) {
decoratedTask = newTaskWithRelocationPlan(decoratedTask, relocationDataReplicator.getCurrent().getPlans().get(decoratedTask.getId()));
}
return decoratedTask;
}
public List<Task> injectIntoTasks(List<Task> tasks) {
List<Task> decoratedTasks = new ArrayList<>();
tasks.forEach(task -> decoratedTasks.add(injectIntoTask(task)));
return decoratedTasks;
}
public TaskQueryResult injectIntoTaskQueryResult(TaskQueryResult queryResult) {
return queryResult.toBuilder()
.clearItems()
.addAllItems(injectIntoTasks(queryResult.getItemsList()))
.build();
}
private Task newTaskWithContainerState(Task task) {
if (task.hasStatus()) {
return task.toBuilder().setStatus(task.getStatus().toBuilder()
.addAllContainerState(getContainerState(task.getId()))).build();
}
return task.toBuilder().setStatus(TaskStatus.newBuilder()
.addAllContainerState(getContainerState(task.getId()))).build();
}
private List<TaskStatus.ContainerState> getContainerState(String taskId) {
Map<String, Pod> pods = kubeApiConnector.getPods();
if (pods.get(taskId) == null) {
return Collections.emptyList();
}
Pod pod = pods.get(taskId);
List<ContainerStatus> containerStatuses = pod.getStatus().getContainerStatuses();
if (CollectionsExt.isNullOrEmpty(containerStatuses)) {
return Collections.emptyList();
}
List<TaskStatus.ContainerState> containerStates = new ArrayList<>();
for (ContainerStatus containerStatus : containerStatuses) {
ContainerState status = containerStatus.getState();
String containerName = containerStatus.getName();
TaskStatus.ContainerState.ContainerHealth containerHealth = TaskStatus.ContainerState.ContainerHealth.Unset;
if (status.getRunning() != null) {
containerHealth = TaskStatus.ContainerState.ContainerHealth.Healthy;
} else if (status.getTerminated() != null) {
containerHealth = TaskStatus.ContainerState.ContainerHealth.Unhealthy;
}
BasicImage basicImage = buildBasicImageFromContainerStatus(pod, containerStatus.getImage(), containerName);
String platformSidecar = getPlatformSidecarThatCreatedContainer(pod, containerName);
containerStates.add(TaskStatus.ContainerState.newBuilder()
.setContainerName(containerStatus.getName())
.setContainerHealth(containerHealth)
.setContainerImage(basicImage)
.setPlatformSidecar(platformSidecar)
.build());
}
return containerStates;
}
/**
* Generates a Titus object for basic image from then general string object that k8s provides
* (the kind you might provide to `docker pull`)
* But the original tag is often not available. For that we have to look at annotations to see
* the tag that generated the digest (if available, not all images come from tags).
*/
static BasicImage buildBasicImageFromContainerStatus(Pod pod, String image, String containerName) {
BasicImage.Builder bi = BasicImage.newBuilder();
String imageWithoutRegistry = stripRegistryFromImage(image);
String name = getNameFromImageString(imageWithoutRegistry);
bi.setName(name);
String digestFromImageString = getDigestFromImageString(imageWithoutRegistry);
if (digestFromImageString.equals("")) {
// If the image string doesn't have a digest, then the best we can do is
// set the tag from whatever the image string has
bi.setTag(getTagFromImageString(imageWithoutRegistry));
} else {
// If we *do* have a digest, then we can set it, but we have to get the tag
// from an annotation. This is because the k8s pod object doesn't have room
// for both pieces of data
bi.setDigest(digestFromImageString);
bi.setTag(getTagFromAnnotation(containerName, pod));
}
return bi.build();
}
private static String stripRegistryFromImage(String image) {
int slashStart = image.indexOf("/");
if (slashStart < 0) {
return image;
} else {
return image.substring(slashStart + 1);
}
}
private static String getTagFromImageString(String image) {
int tagStart = image.lastIndexOf(":");
if (tagStart < 0) {
return "";
} else {
return image.substring(tagStart + 1);
}
}
private static String getTagFromAnnotation(String containerName, Pod pod) {
String key = ANNOTATION_KEY_IMAGE_TAG_PREFIX + containerName;
return pod.getMetadata().getAnnotations().getOrDefault(key, "");
}
private static String getDigestFromImageString(String image) {
int digestStart = image.lastIndexOf("@");
if (digestStart < 0) {
return "";
} else {
return image.substring(digestStart + 1);
}
}
private static String getNameFromImageString(String image) {
int digestStart = image.lastIndexOf("@");
if (digestStart < 0) {
int tagStart = image.lastIndexOf(":");
if (tagStart < 0) {
return image;
}
return image.substring(0, tagStart);
} else {
return image.substring(0, digestStart);
}
}
/**
* Inspects a pod and determines what platform sidecar originally injected the container.
* Returns empty string in the case that no platform sidecar injected the container (implies the container was user-defined)
*
* @param pod pod to inspect
* @param containerName the name of the container that was (potentially) added
* @return platform sidecar name
*/
private String getPlatformSidecarThatCreatedContainer(Pod pod, String containerName) {
String key = containerName + "." + AnnotationKeySuffixContainers + "/" + AnnotationKeySuffixContainersSidecar;
return pod.getMetadata().getAnnotations().getOrDefault(key, "");
}
static Task newTaskWithRelocationPlan(Task task, TaskRelocationPlan relocationPlan) {
if (relocationPlan == null) {
return task;
}
// If already set, assume this comes from the legacy task migration
if (task.getMigrationDetails().getNeedsMigration()) {
return task;
}
if (relocationPlan.getRelocationTime() <= 0) {
return task;
}
return task.toBuilder().setMigrationDetails(
MigrationDetails.newBuilder()
.setNeedsMigration(true)
.setStarted(relocationPlan.getDecisionTime())
.setDeadline(relocationPlan.getRelocationTime())
.build()
).build();
}
}
| 9,389 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/DefaultHealthService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.grpc.protogen.HealthCheckRequest;
import com.netflix.titus.grpc.protogen.HealthCheckResponse;
import com.netflix.titus.grpc.protogen.HealthGrpc;
import com.netflix.titus.runtime.connector.GrpcClientConfiguration;
import com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil;
import com.netflix.titus.runtime.endpoint.metadata.CallMetadataResolver;
import com.netflix.titus.runtime.service.HealthService;
import io.grpc.stub.StreamObserver;
import rx.Observable;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createRequestObservable;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createSimpleClientResponseObserver;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createWrappedStub;
@Singleton
public class DefaultHealthService implements HealthService {
private final GrpcClientConfiguration configuration;
private final CallMetadataResolver callMetadataResolver;
private final HealthGrpc.HealthStub client;
@Inject
public DefaultHealthService(GrpcClientConfiguration configuration, CallMetadataResolver callMetadataResolver, HealthGrpc.HealthStub client) {
this.configuration = configuration;
this.callMetadataResolver = callMetadataResolver;
this.client = client;
}
@Override
public Observable<HealthCheckResponse> check(HealthCheckRequest request) {
return createRequestObservable(emitter -> {
StreamObserver<HealthCheckResponse> streamObserver = createSimpleClientResponseObserver(emitter);
GrpcUtil.createWrappedStubWithResolver(client, callMetadataResolver, configuration.getRequestTimeout()).check(request, streamObserver);
}, configuration.getRequestTimeout());
}
}
| 9,390 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/DisruptionBudgetSanitizerConfiguration.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
@Configuration(prefix = "titusGateway.disruptionBudgetSanitizer")
public interface DisruptionBudgetSanitizerConfiguration {
long TIME_7DAYS_MS = 7 * 24 * 60 * 60 * 1000;
@DefaultValue("" + TIME_7DAYS_MS)
long getServiceSelfManagedRelocationTimeMs();
}
| 9,391 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/DefaultLoadBalancerService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.google.protobuf.Empty;
import com.netflix.titus.api.loadbalancer.model.sanitizer.LoadBalancerResourceValidator;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.api.service.TitusServiceException;
import com.netflix.titus.grpc.protogen.AddLoadBalancerRequest;
import com.netflix.titus.grpc.protogen.GetAllLoadBalancersRequest;
import com.netflix.titus.grpc.protogen.GetAllLoadBalancersResult;
import com.netflix.titus.grpc.protogen.GetJobLoadBalancersResult;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.LoadBalancerServiceGrpc.LoadBalancerServiceStub;
import com.netflix.titus.grpc.protogen.RemoveLoadBalancerRequest;
import com.netflix.titus.runtime.connector.GrpcClientConfiguration;
import com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil;
import com.netflix.titus.runtime.service.LoadBalancerService;
import io.grpc.stub.StreamObserver;
import rx.Completable;
import rx.Observable;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createRequestCompletable;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createRequestObservable;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createSimpleClientResponseObserver;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createWrappedStub;
@Singleton
public class DefaultLoadBalancerService implements LoadBalancerService {
private final GrpcClientConfiguration configuration;
private final LoadBalancerServiceStub client;
private final LoadBalancerResourceValidator validator;
@Inject
public DefaultLoadBalancerService(GrpcClientConfiguration configuration,
LoadBalancerResourceValidator validator,
LoadBalancerServiceStub client) {
this.configuration = configuration;
this.client = client;
this.validator = validator;
}
@Override
public Observable<GetJobLoadBalancersResult> getLoadBalancers(JobId jobId, CallMetadata callMetadata) {
return createRequestObservable(emitter -> {
StreamObserver<GetJobLoadBalancersResult> streamObserver = createSimpleClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, configuration.getRequestTimeout()).getJobLoadBalancers(jobId, streamObserver);
}, configuration.getRequestTimeout());
}
@Override
public Observable<GetAllLoadBalancersResult> getAllLoadBalancers(GetAllLoadBalancersRequest request, CallMetadata callMetadata) {
return createRequestObservable(emitter -> {
StreamObserver<GetAllLoadBalancersResult> streamObserver = createSimpleClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, configuration.getRequestTimeout()).getAllLoadBalancers(request, streamObserver);
}, configuration.getRequestTimeout());
}
@Override
public Completable addLoadBalancer(AddLoadBalancerRequest addLoadBalancerRequest, CallMetadata callMetadata) {
return validator.validateLoadBalancer(addLoadBalancerRequest.getLoadBalancerId().getId())
.onErrorResumeNext(e -> Completable.error(TitusServiceException.invalidArgument(e.getMessage())))
.andThen(createRequestCompletable(emitter -> {
StreamObserver<Empty> streamObserver = GrpcUtil.createEmptyClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, configuration.getRequestTimeout()).addLoadBalancer(addLoadBalancerRequest, streamObserver);
}, configuration.getRequestTimeout()));
}
@Override
public Completable removeLoadBalancer(RemoveLoadBalancerRequest removeLoadBalancerRequest, CallMetadata callMetadata) {
return createRequestCompletable(emitter -> {
StreamObserver<Empty> streamObserver = GrpcUtil.createEmptyClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, configuration.getRequestTimeout()).removeLoadBalancer(removeLoadBalancerRequest, streamObserver);
}, configuration.getRequestTimeout());
}
}
| 9,392 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/NeedsMigrationQueryHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.LogStorageInfo;
import com.netflix.titus.api.jobmanager.model.job.Task;
import com.netflix.titus.api.model.Page;
import com.netflix.titus.api.model.PageResult;
import com.netflix.titus.api.model.Pagination;
import com.netflix.titus.api.model.PaginationUtil;
import com.netflix.titus.api.relocation.model.TaskRelocationPlan;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.grpc.protogen.JobDescriptor;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.runtime.connector.jobmanager.JobDataReplicator;
import com.netflix.titus.runtime.connector.jobmanager.snapshot.JobSnapshot;
import com.netflix.titus.runtime.connector.relocation.RelocationDataReplicator;
import com.netflix.titus.runtime.endpoint.JobQueryCriteria;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters;
import com.netflix.titus.runtime.endpoint.v3.grpc.query.V3TaskQueryCriteriaEvaluator;
import com.netflix.titus.runtime.jobmanager.JobManagerCursors;
import static com.netflix.titus.gateway.service.v3.internal.TaskDataInjector.newTaskWithRelocationPlan;
@Singleton
class NeedsMigrationQueryHandler {
private final JobDataReplicator jobDataReplicator;
private final RelocationDataReplicator relocationDataReplicator;
private final LogStorageInfo<Task> logStorageInfo;
private final TitusRuntime titusRuntime;
@Inject
NeedsMigrationQueryHandler(JobDataReplicator jobDataReplicator,
RelocationDataReplicator relocationDataReplicator,
LogStorageInfo<Task> logStorageInfo,
TitusRuntime titusRuntime) {
this.jobDataReplicator = jobDataReplicator;
this.relocationDataReplicator = relocationDataReplicator;
this.logStorageInfo = logStorageInfo;
this.titusRuntime = titusRuntime;
}
/**
* 'needsMigration' filter requires that there is at least one task that is active and requires migration.
* The query is executed by finding all tasks requiring migration that match the given criteria, and next resolve
* from that set their jobs.
*/
PageResult<com.netflix.titus.grpc.protogen.Job> findJobs(JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> queryCriteria, Page page) {
List<com.netflix.titus.grpc.protogen.Task> matchingTasks = findMatchingTasks(queryCriteria);
if (matchingTasks.isEmpty()) {
return PageResult.pageOf(
Collections.emptyList(),
Pagination.newBuilder().withCurrentPage(page).withCursor("").withHasMore(false).build()
);
}
Set<String> matchingJobIds = new HashSet<>();
matchingTasks.forEach(task -> matchingJobIds.add(task.getJobId()));
List<com.netflix.titus.grpc.protogen.Job> jobsToReturn = new ArrayList<>();
Map<String, Job<?>> allJobs = jobDataReplicator.getCurrent().getJobMap();
allJobs.forEach((jobId, job) -> {
if (matchingJobIds.contains(job.getId())) {
jobsToReturn.add(GrpcJobManagementModelConverters.toGrpcJob(job));
}
});
Pair<List<com.netflix.titus.grpc.protogen.Job>, Pagination> paginationPair = PaginationUtil.takePageWithCursor(
page,
jobsToReturn,
JobManagerCursors.jobCursorOrderComparator(),
JobManagerCursors::jobIndexOf,
JobManagerCursors::newCursorFrom
);
return PageResult.pageOf(paginationPair.getLeft(), paginationPair.getRight());
}
PageResult<com.netflix.titus.grpc.protogen.Task> findTasks(JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> queryCriteria, Page page) {
List<com.netflix.titus.grpc.protogen.Task> matchingTasks = findMatchingTasks(queryCriteria);
Pair<List<com.netflix.titus.grpc.protogen.Task>, Pagination> paginationPair = PaginationUtil.takePageWithCursor(
page,
matchingTasks,
JobManagerCursors.taskCursorOrderComparator(),
JobManagerCursors::taskIndexOf,
JobManagerCursors::newTaskCursorFrom
);
return PageResult.pageOf(paginationPair.getLeft(), paginationPair.getRight());
}
private List<com.netflix.titus.grpc.protogen.Task> findMatchingTasks(JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> queryCriteria) {
JobSnapshot jobSnapshot = jobDataReplicator.getCurrent();
Map<String, Job<?>> jobMap = jobSnapshot.getJobMap();
Map<String, TaskRelocationPlan> relocationPlans = relocationDataReplicator.getCurrent().getPlans();
V3TaskQueryCriteriaEvaluator queryFilter = new V3TaskQueryCriteriaEvaluator(queryCriteria, titusRuntime);
V3TaskQueryCriteriaEvaluator queryFilterWithoutNeedsMigration = new V3TaskQueryCriteriaEvaluator(filterOutNeedsMigration(queryCriteria), titusRuntime);
List<com.netflix.titus.grpc.protogen.Task> matchingTasks = new ArrayList<>();
jobMap.forEach((jobId, job) -> {
Map<String, Task> tasks = jobSnapshot.getTasks(jobId);
if (!CollectionsExt.isNullOrEmpty(tasks)) {
tasks.forEach((taskId, task) -> {
TaskRelocationPlan plan = relocationPlans.get(task.getId());
Pair<Job<?>, Task> jobTaskPair = Pair.of(job, task);
if (plan != null) {
if (queryFilterWithoutNeedsMigration.test(jobTaskPair)) {
matchingTasks.add(newTaskWithRelocationPlan(GrpcJobManagementModelConverters.toGrpcTask(task, logStorageInfo), plan));
}
} else {
if (queryFilter.test(jobTaskPair)) {
matchingTasks.add(GrpcJobManagementModelConverters.toGrpcTask(task, logStorageInfo));
}
}
});
}
});
return matchingTasks;
}
private JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> filterOutNeedsMigration(JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> queryCriteria) {
return queryCriteria.toBuilder().withNeedsMigration(false).build();
}
}
| 9,393 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/GatewayJobServiceGateway.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Sets;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.LogStorageInfo;
import com.netflix.titus.api.jobmanager.model.job.TaskState;
import com.netflix.titus.api.jobmanager.model.job.sanitizer.JobAssertions;
import com.netflix.titus.api.jobmanager.store.JobStore;
import com.netflix.titus.api.jobmanager.store.JobStoreException;
import com.netflix.titus.api.model.PageResult;
import com.netflix.titus.api.model.Pagination;
import com.netflix.titus.api.model.PaginationUtil;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.api.service.TitusServiceException;
import com.netflix.titus.common.model.admission.AdmissionSanitizer;
import com.netflix.titus.common.model.admission.AdmissionValidator;
import com.netflix.titus.common.model.sanitizer.EntitySanitizer;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.ExceptionExt;
import com.netflix.titus.common.util.StringExt;
import com.netflix.titus.common.util.time.Clock;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.grpc.protogen.Job;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.JobDescriptor;
import com.netflix.titus.grpc.protogen.JobId;
import com.netflix.titus.grpc.protogen.JobManagementServiceGrpc.JobManagementServiceStub;
import com.netflix.titus.grpc.protogen.JobQuery;
import com.netflix.titus.grpc.protogen.JobQueryResult;
import com.netflix.titus.grpc.protogen.ObserveJobsQuery;
import com.netflix.titus.grpc.protogen.Page;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskId;
import com.netflix.titus.grpc.protogen.TaskQuery;
import com.netflix.titus.grpc.protogen.TaskQueryResult;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.runtime.connector.GrpcRequestConfiguration;
import com.netflix.titus.runtime.endpoint.JobQueryCriteria;
import com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobQueryModelConverters;
import com.netflix.titus.runtime.jobmanager.JobManagerConfiguration;
import com.netflix.titus.runtime.jobmanager.JobManagerCursors;
import com.netflix.titus.runtime.jobmanager.gateway.GrpcJobServiceGateway;
import com.netflix.titus.runtime.jobmanager.gateway.JobServiceGateway;
import com.netflix.titus.runtime.jobmanager.gateway.JobServiceGatewayDelegate;
import com.netflix.titus.runtime.jobmanager.gateway.SanitizingJobServiceGateway;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.grpc.stub.StreamObserver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import static com.netflix.titus.api.FeatureRolloutPlans.ENVIRONMENT_VARIABLE_NAMES_STRICT_VALIDATION_FEATURE;
import static com.netflix.titus.api.FeatureRolloutPlans.SECURITY_GROUPS_REQUIRED_FEATURE;
import static com.netflix.titus.api.jobmanager.model.job.sanitizer.JobSanitizerBuilder.JOB_STRICT_SANITIZER;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createRequestObservable;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createSimpleClientResponseObserver;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createWrappedStub;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobQueryModelConverters.toGrpcPagination;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobQueryModelConverters.toJobQueryCriteria;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobQueryModelConverters.toPage;
/**
* {@link JobServiceGateway} implementation merging the active and the archived data sets with extra validation rules.
*/
@Singleton
public class GatewayJobServiceGateway extends JobServiceGatewayDelegate {
private static Logger logger = LoggerFactory.getLogger(GatewayJobServiceGateway.class);
private static final int MAX_CONCURRENT_JOBS_TO_RETRIEVE = 10;
private final GrpcRequestConfiguration tunablesConfiguration;
private final GatewayConfiguration gatewayConfiguration;
private final JobManagementServiceStub client;
private final LocalCacheQueryProcessor localCacheQueryProcessor;
private final JobStore store;
private final LogStorageInfo<com.netflix.titus.api.jobmanager.model.job.Task> logStorageInfo;
private final TaskDataInjector taskDataInjector;
private final NeedsMigrationQueryHandler needsMigrationQueryHandler;
private final Clock clock;
@Inject
public GatewayJobServiceGateway(GrpcRequestConfiguration tunablesConfiguration,
GatewayConfiguration gatewayConfiguration,
JobManagerConfiguration jobManagerConfiguration,
JobManagementServiceStub client,
JobStore store,
LogStorageInfo<com.netflix.titus.api.jobmanager.model.job.Task> logStorageInfo,
TaskDataInjector taskDataInjector,
NeedsMigrationQueryHandler needsMigrationQueryHandler,
LocalCacheQueryProcessor localCacheQueryProcessor,
@Named(JOB_STRICT_SANITIZER) EntitySanitizer entitySanitizer,
DisruptionBudgetSanitizer disruptionBudgetSanitizer,
@Named(SECURITY_GROUPS_REQUIRED_FEATURE) Predicate<com.netflix.titus.api.jobmanager.model.job.JobDescriptor> securityGroupsRequiredPredicate,
@Named(ENVIRONMENT_VARIABLE_NAMES_STRICT_VALIDATION_FEATURE) Predicate<com.netflix.titus.api.jobmanager.model.job.JobDescriptor> environmentVariableNamesStrictValidationPredicate,
JobAssertions jobAssertions,
AdmissionValidator<com.netflix.titus.api.jobmanager.model.job.JobDescriptor> validator,
AdmissionSanitizer<com.netflix.titus.api.jobmanager.model.job.JobDescriptor> sanitizer,
TitusRuntime titusRuntime) {
super(new SanitizingJobServiceGateway(
new GrpcJobServiceGateway(client, tunablesConfiguration, titusRuntime),
new ExtendedJobSanitizer(
jobManagerConfiguration,
jobAssertions,
entitySanitizer,
disruptionBudgetSanitizer,
securityGroupsRequiredPredicate,
environmentVariableNamesStrictValidationPredicate,
titusRuntime
),
validator, sanitizer));
this.tunablesConfiguration = tunablesConfiguration;
this.gatewayConfiguration = gatewayConfiguration;
this.client = client;
this.localCacheQueryProcessor = localCacheQueryProcessor;
this.store = store;
this.logStorageInfo = logStorageInfo;
this.taskDataInjector = taskDataInjector;
this.needsMigrationQueryHandler = needsMigrationQueryHandler;
this.clock = titusRuntime.getClock();
}
@Override
public Observable<Job> findJob(String jobId, CallMetadata callMetadata) {
TitusServiceException validationError = SanitizingJobServiceGateway.checkJobId(jobId).orElse(null);
if (validationError != null) {
return Observable.error(validationError);
}
if (localCacheQueryProcessor.canUseCache(Collections.emptyMap(), "findJob", callMetadata)) {
return localCacheQueryProcessor.syncCache("findJob", Job.class).concatWith(
Observable.defer(() -> {
Job grpcJob = localCacheQueryProcessor.findJob(jobId).orElse(null);
if (grpcJob != null) {
return Observable.just(grpcJob);
}
return retrieveArchivedJob(jobId);
})
);
}
Observable<Job> observable = createRequestObservable(emitter -> {
StreamObserver<Job> streamObserver = createSimpleClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, tunablesConfiguration.getRequestTimeoutMs()).findJob(JobId.newBuilder().setId(jobId).build(), streamObserver);
}, tunablesConfiguration.getRequestTimeoutMs());
return observable.onErrorResumeNext(e -> {
if (e instanceof StatusRuntimeException &&
((StatusRuntimeException) e).getStatus().getCode() == Status.Code.NOT_FOUND) {
return retrieveArchivedJob(jobId);
} else {
return Observable.error(e);
}
}).timeout(tunablesConfiguration.getRequestTimeoutMs(), TimeUnit.MILLISECONDS);
}
@Override
public Observable<JobQueryResult> findJobs(JobQuery jobQuery, CallMetadata callMetadata) {
Map<String, String> filteringCriteriaMap = jobQuery.getFilteringCriteriaMap();
boolean needsMigrationFilter = "true".equalsIgnoreCase(filteringCriteriaMap.getOrDefault("needsMigration", "false"));
// "needsMigration" query is served from the local job and relocation cache.
if (needsMigrationFilter) {
PageResult<Job> pageResult = needsMigrationQueryHandler.findJobs(GrpcJobQueryModelConverters.toJobQueryCriteria(jobQuery), toPage(jobQuery.getPage()));
return Observable.just(JobQueryResult.newBuilder()
.setPagination(toGrpcPagination(pageResult.getPagination()))
.addAllItems(pageResult.getItems())
.build()
);
}
if (localCacheQueryProcessor.canUseCache(jobQuery.getFilteringCriteriaMap(), "findJobs", callMetadata)) {
return localCacheQueryProcessor.syncCache("findJobs", JobQueryResult.class).concatWith(
Observable.fromCallable(() -> localCacheQueryProcessor.findJobs(jobQuery))
);
}
return super.findJobs(jobQuery, callMetadata);
}
@Override
public Observable<Task> findTask(String taskId, CallMetadata callMetadata) {
TitusServiceException validationError = SanitizingJobServiceGateway.checkTaskId(taskId).orElse(null);
if (validationError != null) {
return Observable.error(validationError);
}
if (localCacheQueryProcessor.canUseCache(Collections.emptyMap(), "findTask", callMetadata)) {
return localCacheQueryProcessor.syncCache("findTask", Task.class).concatWith(
Observable.defer(() -> {
Task grpcTask = localCacheQueryProcessor.findTask(taskId).orElse(null);
if (grpcTask != null) {
return Observable.just(taskDataInjector.injectIntoTask(grpcTask));
}
return retrieveArchivedTask(taskId);
})
);
}
Observable<Task> observable = createRequestObservable(
emitter -> {
StreamObserver<Task> streamObserver = createSimpleClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, tunablesConfiguration.getRequestTimeoutMs()).findTask(TaskId.newBuilder().setId(taskId).build(), streamObserver);
},
tunablesConfiguration.getRequestTimeoutMs()
);
observable = observable.map(taskDataInjector::injectIntoTask);
observable = observable.onErrorResumeNext(e -> {
if (e instanceof StatusRuntimeException &&
((StatusRuntimeException) e).getStatus().getCode() == Status.Code.NOT_FOUND) {
return retrieveArchivedTask(taskId);
} else {
return Observable.error(e);
}
});
return observable.timeout(tunablesConfiguration.getRequestTimeoutMs(), TimeUnit.MILLISECONDS);
}
@Override
public Observable<TaskQueryResult> findTasks(TaskQuery taskQuery, CallMetadata callMetadata) {
Map<String, String> filteringCriteriaMap = taskQuery.getFilteringCriteriaMap();
Set<String> v3JobIds = new HashSet<>(StringExt.splitByComma(filteringCriteriaMap.getOrDefault("jobIds", "")));
boolean needsMigrationFilter = "true".equalsIgnoreCase(filteringCriteriaMap.getOrDefault("needsMigration", "false"));
// "needsMigration" query is served from the local job and relocation cache.
if (needsMigrationFilter) {
PageResult<Task> pageResult = needsMigrationQueryHandler.findTasks(GrpcJobQueryModelConverters.toJobQueryCriteria(taskQuery), toPage(taskQuery.getPage()));
TaskQueryResult taskQueryResult = TaskQueryResult.newBuilder()
.setPagination(toGrpcPagination(pageResult.getPagination()))
.addAllItems(taskDataInjector.injectIntoTasks(pageResult.getItems()))
.build();
return Observable.just(taskQueryResult);
}
Set<String> taskStates = Sets.newHashSet(StringExt.splitByComma(taskQuery.getFilteringCriteriaMap().getOrDefault("taskStates", "")));
Observable<TaskQueryResult> observable;
if (v3JobIds.isEmpty()) {
// Active task set only
observable = newActiveTaskQueryAction(taskQuery, callMetadata);
} else {
if (!taskStates.contains(TaskState.Finished.name())) {
// Active task set only
observable = newActiveTaskQueryAction(taskQuery, callMetadata);
} else {
Page page = taskQuery.getPage();
boolean nextPageByNumber = StringExt.isEmpty(page.getCursor()) && page.getPageNumber() > 0;
if (nextPageByNumber) {
// In this case we ask for active and archived tasks using a page number > 0. Because of that
// we have to fetch as much tasks from master as we can. Tasks that we do not fetch, will not be
// visible to the client.
TaskQuery largePageQuery = taskQuery.toBuilder().setPage(taskQuery.getPage().toBuilder().setPageNumber(0).setPageSize(gatewayConfiguration.getMaxTaskPageSize())).build();
observable = newActiveTaskQueryAction(largePageQuery, callMetadata);
} else {
observable = newActiveTaskQueryAction(taskQuery, callMetadata);
}
observable = observable.flatMap(result ->
retrieveArchivedTasksForJobs(v3JobIds, taskQuery).map(archivedTasks -> combineTaskResults(taskQuery, result, archivedTasks))
);
}
}
return observable.timeout(tunablesConfiguration.getRequestTimeoutMs(), TimeUnit.MILLISECONDS).map(queryResult -> taskDataInjector.injectIntoTaskQueryResult(queryResult));
}
@Override
public Observable<JobChangeNotification> observeJob(String jobId, CallMetadata callMetadata) {
TitusServiceException validationError = SanitizingJobServiceGateway.checkJobId(jobId).orElse(null);
if (validationError != null) {
return Observable.error(validationError);
}
if (localCacheQueryProcessor.canUseCache(Collections.emptyMap(), "observeJob", callMetadata)) {
return localCacheQueryProcessor.syncCache("observeJob", JobChangeNotification.class)
.concatWith(localCacheQueryProcessor.observeJob(jobId))
.map(taskDataInjector::injectIntoTaskUpdateEvent);
}
return super.observeJob(jobId, callMetadata).map(taskDataInjector::injectIntoTaskUpdateEvent);
}
@Override
public Observable<JobChangeNotification> observeJobs(ObserveJobsQuery query, CallMetadata callMetadata) {
if (localCacheQueryProcessor.canUseCache(query.getFilteringCriteriaMap(), "observeJobs", callMetadata)) {
return localCacheQueryProcessor.syncCache("observeJobs", JobChangeNotification.class)
.concatWith(localCacheQueryProcessor.observeJobs(query))
.map(taskDataInjector::injectIntoTaskUpdateEvent);
}
return super.observeJobs(query, callMetadata).map(taskDataInjector::injectIntoTaskUpdateEvent);
}
private Observable<TaskQueryResult> newActiveTaskQueryAction(TaskQuery taskQuery, CallMetadata callMetadata) {
if (localCacheQueryProcessor.canUseCache(taskQuery.getFilteringCriteriaMap(), "findTasks", callMetadata)) {
return localCacheQueryProcessor.syncCache("findTasks", TaskQueryResult.class).concatWith(
Observable.fromCallable(() -> {
TaskQueryResult taskQueryResult = localCacheQueryProcessor.findTasks(taskQuery);
return taskQueryResult.toBuilder()
.clearItems()
.addAllItems(taskDataInjector.injectIntoTasks(taskQueryResult.getItemsList()))
.build();
})
);
}
return GrpcUtil.<TaskQueryResult>createRequestObservable(emitter -> {
StreamObserver<TaskQueryResult> streamObserver = createSimpleClientResponseObserver(emitter);
createWrappedStub(client, callMetadata, tunablesConfiguration.getRequestTimeoutMs()).findTasks(taskQuery, streamObserver);
}, tunablesConfiguration.getRequestTimeoutMs()).map(taskQueryResult ->
taskQueryResult.toBuilder()
.clearItems()
.addAllItems(taskDataInjector.injectIntoTasks(taskQueryResult.getItemsList()))
.build()
);
}
private Observable<Job> retrieveArchivedJob(String jobId) {
return store.retrieveArchivedJob(jobId)
.onErrorResumeNext(e -> {
if (e instanceof JobStoreException) {
JobStoreException storeException = (JobStoreException) e;
if (storeException.getErrorCode().equals(JobStoreException.ErrorCode.JOB_DOES_NOT_EXIST)) {
return Observable.error(TitusServiceException.jobNotFound(jobId));
}
}
return Observable.error(TitusServiceException.unexpected("Not able to retrieve the job: %s (%s)", jobId, ExceptionExt.toMessageChain(e)));
}).map(GrpcJobManagementModelConverters::toGrpcJob);
}
private Observable<List<Task>> retrieveArchivedTasksForJobs(Set<String> jobIds, TaskQuery taskQuery) {
JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> taskQueryCriteria = toJobQueryCriteria(taskQuery);
return Observable.fromCallable(() -> jobIds.stream().map(store::retrieveArchivedTasksForJob).collect(Collectors.toList()))
.flatMap(observables -> Observable.merge(observables, MAX_CONCURRENT_JOBS_TO_RETRIEVE))
.filter(task -> {
// We cannot use V3TaskQueryCriteriaEvaluator here as we do not have the job record, and requesting it
// is expensive. To get here, a user has to provide job id(s) for which finished tasks are requested.
// Because of that we do not need all the filtering criteria which mostly work at the job level to
// exclude jobs from the query result. Instead we implement a few that make sense for task.
Set<String> expectedStateReasons = taskQueryCriteria.getTaskStateReasons();
if (!expectedStateReasons.isEmpty() && !expectedStateReasons.contains(task.getStatus().getReasonCode())) {
return false;
}
if (taskQueryCriteria.isSkipSystemFailures()) {
if (com.netflix.titus.api.jobmanager.model.job.TaskStatus.isSystemError(task.getStatus())) {
return false;
}
}
return true;
})
.map(task -> {
com.netflix.titus.api.jobmanager.model.job.Task fixedTask = task.getStatus().getState() == TaskState.Finished
? task
: JobFunctions.fixArchivedTaskStatus(task, clock);
return GrpcJobManagementModelConverters.toGrpcTask(fixedTask, logStorageInfo);
})
.toSortedList((first, second) -> Long.compare(first.getStatus().getTimestamp(), second.getStatus().getTimestamp()));
}
private Observable<Task> retrieveArchivedTask(String taskId) {
return store.retrieveArchivedTask(taskId)
.onErrorResumeNext(e -> {
if (e instanceof JobStoreException) {
JobStoreException storeException = (JobStoreException) e;
if (storeException.getErrorCode().equals(JobStoreException.ErrorCode.TASK_DOES_NOT_EXIST)) {
return Observable.error(TitusServiceException.taskNotFound(taskId));
}
}
return Observable.error(TitusServiceException.unexpected("Not able to retrieve the task: %s (%s)", taskId, ExceptionExt.toMessageChain(e)));
})
.map(task -> {
com.netflix.titus.api.jobmanager.model.job.Task fixedTask = task.getStatus().getState() == TaskState.Finished
? task
: JobFunctions.fixArchivedTaskStatus(task, clock);
return GrpcJobManagementModelConverters.toGrpcTask(fixedTask, logStorageInfo);
});
}
@VisibleForTesting
static TaskQueryResult combineTaskResults(TaskQuery taskQuery,
TaskQueryResult activeTasksResult,
List<Task> archivedTasks) {
List<Task> tasks = deDupTasks(activeTasksResult.getItemsList(), archivedTasks);
Pair<List<Task>, Pagination> paginationPair = PaginationUtil.takePageWithCursor(
toPage(taskQuery.getPage()),
tasks,
JobManagerCursors.taskCursorOrderComparator(),
JobManagerCursors::taskIndexOf,
JobManagerCursors::newTaskCursorFrom
);
// Fix pagination result, as the total items count does not include all active tasks.
// The total could be larger than the actual number of tasks, as we are not filtering duplicates.
// This could be fixed in the future, when the gateway stores all active tasks in a local cache.
int allTasksCount = activeTasksResult.getPagination().getTotalItems() + archivedTasks.size();
Pair<List<Task>, Pagination> fixedPaginationPair = paginationPair.mapRight(p -> p.toBuilder()
.withTotalItems(allTasksCount)
.withTotalPages(PaginationUtil.numberOfPages(toPage(taskQuery.getPage()), allTasksCount))
.build()
);
return TaskQueryResult.newBuilder()
.addAllItems(fixedPaginationPair.getLeft())
.setPagination(toGrpcPagination(fixedPaginationPair.getRight()))
.build();
}
/**
* It is ok to find the same task in the active and the archived data set. This may happen as the active and the archive
* queries are run one after the other. In such case we know that the archive task is the latest copy, and should be
* returned to the client.
*/
@VisibleForTesting
static List<Task> deDupTasks(List<Task> activeTasks, List<Task> archivedTasks) {
Map<String, Task> archivedTasksMap = archivedTasks.stream().collect(Collectors.toMap(Task::getId, Function.identity()));
List<Task> uniqueActiveTasks = activeTasks.stream().filter(activeTask -> {
if (archivedTasksMap.containsKey(activeTask.getId())) {
logger.warn("Duplicate Task detected (archived) {} - (active) {}", archivedTasksMap.get(activeTask.getId()), activeTask);
return false;
}
return true;
}).collect(Collectors.toList());
uniqueActiveTasks.addAll(archivedTasks);
return uniqueActiveTasks;
}
}
| 9,394 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/DisruptionBudgetSanitizer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.Collections;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.api.jobmanager.model.job.JobDescriptor;
import com.netflix.titus.api.jobmanager.model.job.JobFunctions;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.DisruptionBudget;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.DisruptionBudgetFunctions;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.SelfManagedDisruptionBudgetPolicy;
import com.netflix.titus.api.jobmanager.model.job.disruptionbudget.UnlimitedDisruptionBudgetRate;
import com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt;
import com.netflix.titus.api.jobmanager.model.job.ext.ServiceJobExt;
import com.netflix.titus.api.jobmanager.model.job.migration.MigrationPolicy;
import com.netflix.titus.api.jobmanager.model.job.migration.SelfManagedMigrationPolicy;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.gateway.MetricConstants;
@Singleton
public class DisruptionBudgetSanitizer {
private static final String METRICS_ROOT = MetricConstants.METRIC_JOB_MANAGEMENT + "disruptionBudget.";
private static final int MIN_BATCH_RELOCATION_TIME_MS = 60_000;
@VisibleForTesting
static final int DEFAULT_SERVICE_RELOCATION_TIME_MS = 60_000;
/**
* A strict alignment of a batch task runtime limit and a self migration deadline, may cause interruption of a batch task
* which is almost finished. To prevent that we set migration limit to a slightly higher value.
*/
@VisibleForTesting
static final double BATCH_RUNTIME_LIMIT_FACTOR = 1.2;
private final DisruptionBudgetSanitizerConfiguration configuration;
private final Registry registry;
private final Id nonCompliantId;
@Inject
public DisruptionBudgetSanitizer(DisruptionBudgetSanitizerConfiguration configuration, TitusRuntime titusRuntime) {
this.configuration = configuration;
this.registry = titusRuntime.getRegistry();
this.nonCompliantId = registry.createId(METRICS_ROOT + "nonCompliant");
}
public JobDescriptor sanitize(JobDescriptor original) {
if (!DisruptionBudgetFunctions.isLegacyJobDescriptor(original)) {
return original;
}
record(original);
return JobFunctions.isServiceJob(original)
? injectDefaultServiceDisruptionBudget(original)
: injectDefaultBatchDisruptionBudget(original);
}
private JobDescriptor injectDefaultServiceDisruptionBudget(JobDescriptor<ServiceJobExt> original) {
DisruptionBudget.Builder budgetBuilder = DisruptionBudget.newBuilder()
.withDisruptionBudgetRate(UnlimitedDisruptionBudgetRate.newBuilder().build())
.withContainerHealthProviders(Collections.emptyList())
.withTimeWindows(Collections.emptyList());
MigrationPolicy migrationPolicy = original.getExtensions().getMigrationPolicy();
if (migrationPolicy instanceof SelfManagedMigrationPolicy) {
budgetBuilder.withDisruptionBudgetPolicy(SelfManagedDisruptionBudgetPolicy.newBuilder()
.withRelocationTimeMs(configuration.getServiceSelfManagedRelocationTimeMs())
.build()
);
} else {
// If no policy defined, set short self managed to cause immediate fallback to the system default.
budgetBuilder.withDisruptionBudgetPolicy(SelfManagedDisruptionBudgetPolicy.newBuilder()
.withRelocationTimeMs(DEFAULT_SERVICE_RELOCATION_TIME_MS)
.build()
);
}
return original.toBuilder().withDisruptionBudget(budgetBuilder.build()).build();
}
private JobDescriptor injectDefaultBatchDisruptionBudget(JobDescriptor<BatchJobExt> original) {
long runtimeLimitMs = Math.max(
MIN_BATCH_RELOCATION_TIME_MS,
(long) (original.getExtensions().getRuntimeLimitMs() * BATCH_RUNTIME_LIMIT_FACTOR)
);
DisruptionBudget.Builder budgetBuilder = DisruptionBudget.newBuilder()
.withDisruptionBudgetPolicy(SelfManagedDisruptionBudgetPolicy.newBuilder()
.withRelocationTimeMs(runtimeLimitMs)
.build()
)
.withDisruptionBudgetRate(UnlimitedDisruptionBudgetRate.newBuilder().build())
.withContainerHealthProviders(Collections.emptyList())
.withTimeWindows(Collections.emptyList());
return original.toBuilder().withDisruptionBudget(budgetBuilder.build()).build();
}
private void record(JobDescriptor original) {
registry.counter(nonCompliantId.withTags(
"capacityGroup", original.getCapacityGroup(),
"application", original.getApplicationName(),
"jobType", JobFunctions.isServiceJob(original) ? "service" : "batch"
)).increment();
}
}
| 9,395 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/LocalCacheQueryProcessor.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import com.netflix.titus.api.jobmanager.model.job.Job;
import com.netflix.titus.api.jobmanager.model.job.LogStorageInfo;
import com.netflix.titus.api.jobmanager.model.job.event.JobKeepAliveEvent;
import com.netflix.titus.api.jobmanager.model.job.event.JobManagerEvent;
import com.netflix.titus.api.jobmanager.model.job.event.JobUpdateEvent;
import com.netflix.titus.api.jobmanager.model.job.event.TaskUpdateEvent;
import com.netflix.titus.api.model.Page;
import com.netflix.titus.api.model.PageResult;
import com.netflix.titus.api.model.callmetadata.CallMetadata;
import com.netflix.titus.common.runtime.TitusRuntime;
import com.netflix.titus.common.util.CollectionsExt;
import com.netflix.titus.common.util.ProtobufExt;
import com.netflix.titus.common.util.RegExpExt;
import com.netflix.titus.common.util.rx.ReactorExt;
import com.netflix.titus.common.util.spectator.MetricSelector;
import com.netflix.titus.common.util.spectator.SpectatorExt;
import com.netflix.titus.common.util.spectator.ValueRangeCounter;
import com.netflix.titus.common.util.tuple.Pair;
import com.netflix.titus.gateway.MetricConstants;
import com.netflix.titus.grpc.protogen.JobChangeNotification;
import com.netflix.titus.grpc.protogen.JobDescriptor;
import com.netflix.titus.grpc.protogen.JobQuery;
import com.netflix.titus.grpc.protogen.JobQueryResult;
import com.netflix.titus.grpc.protogen.JobStatus;
import com.netflix.titus.grpc.protogen.ObserveJobsQuery;
import com.netflix.titus.grpc.protogen.Task;
import com.netflix.titus.grpc.protogen.TaskQuery;
import com.netflix.titus.grpc.protogen.TaskQueryResult;
import com.netflix.titus.grpc.protogen.TaskStatus;
import com.netflix.titus.runtime.connector.jobmanager.JobDataReplicator;
import com.netflix.titus.runtime.connector.jobmanager.snapshot.JobSnapshot;
import com.netflix.titus.runtime.endpoint.JobQueryCriteria;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobManagementModelConverters;
import com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobQueryModelConverters;
import com.netflix.titus.runtime.endpoint.v3.grpc.query.V3JobQueryCriteriaEvaluator;
import com.netflix.titus.runtime.endpoint.v3.grpc.query.V3TaskQueryCriteriaEvaluator;
import com.netflix.titus.runtime.jobmanager.JobManagerCursors;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler;
import reactor.core.scheduler.Schedulers;
import rx.Observable;
import static com.netflix.titus.runtime.endpoint.common.grpc.CommonRuntimeGrpcModelConverters.toGrpcPagination;
import static com.netflix.titus.runtime.endpoint.common.grpc.CommonRuntimeGrpcModelConverters.toPage;
import static com.netflix.titus.runtime.endpoint.v3.grpc.GrpcJobQueryModelConverters.toJobQueryCriteria;
import static com.netflix.titus.runtime.jobmanager.gateway.JobServiceGateway.JOB_MINIMUM_FIELD_SET;
import static com.netflix.titus.runtime.jobmanager.gateway.JobServiceGateway.TASK_MINIMUM_FIELD_SET;
@Singleton
public class LocalCacheQueryProcessor {
private static final Logger logger = LoggerFactory.getLogger(LocalCacheQueryProcessor.class);
public static final String PARAMETER_USE_CACHE = "useCache";
private static final JobChangeNotification SNAPSHOT_END_MARKER = JobChangeNotification.newBuilder()
.setSnapshotEnd(JobChangeNotification.SnapshotEnd.newBuilder())
.build();
private static final String METRIC_ROOT = MetricConstants.METRIC_ROOT + "localCacheQueryProcessor.";
private static final String METRIC_CACHE_USE_ALLOWED = METRIC_ROOT + "cacheUseAllowed";
private static final long[] LEVELS = new long[]{1, 2, 5, 10, 20, 30, 40, 50, 100, 200, 500, 1_000, 2_000, 5_000, 10_000, 20_000, 30_000};
private final GatewayConfiguration configuration;
private final JobDataReplicator jobDataReplicator;
private final LogStorageInfo<com.netflix.titus.api.jobmanager.model.job.Task> logStorageInfo;
private final TitusRuntime titusRuntime;
private final Function<String, Matcher> callerIdMatcher;
private final MetricSelector<ValueRangeCounter> syncDelayMetric;
private final Counter rejectedByStalenessTooHighMetric;
private final Scheduler scheduler;
@Inject
public LocalCacheQueryProcessor(GatewayConfiguration configuration,
JobDataReplicator jobDataReplicator,
LogStorageInfo<com.netflix.titus.api.jobmanager.model.job.Task> logStorageInfo,
TitusRuntime titusRuntime) {
this(configuration,
jobDataReplicator,
logStorageInfo,
Schedulers.newBoundedElastic(
configuration.getLocalCacheSchedulerThreadPoolSize(),
Integer.MAX_VALUE,
LocalCacheQueryProcessor.class.getSimpleName() + "-event-processor"
),
titusRuntime
);
}
public LocalCacheQueryProcessor(GatewayConfiguration configuration,
JobDataReplicator jobDataReplicator,
LogStorageInfo<com.netflix.titus.api.jobmanager.model.job.Task> logStorageInfo,
Scheduler scheduler,
TitusRuntime titusRuntime) {
this.configuration = configuration;
this.jobDataReplicator = jobDataReplicator;
this.logStorageInfo = logStorageInfo;
this.titusRuntime = titusRuntime;
this.scheduler = scheduler;
this.callerIdMatcher = RegExpExt.dynamicMatcher(configuration::getQueryFromCacheCallerId,
"titusGateway.queryFromCacheCallerId", Pattern.DOTALL, logger);
Registry registry = titusRuntime.getRegistry();
this.syncDelayMetric = SpectatorExt.newValueRangeCounter(
registry.createId(METRIC_ROOT + "syncDelay"), new String[]{"endpoint"}, LEVELS, registry
);
this.rejectedByStalenessTooHighMetric = registry.counter(METRIC_ROOT + "rejectedByStalenessTooHigh");
}
@PreDestroy
public void shutdown() {
scheduler.dispose();
}
public boolean canUseCache(Map<String, String> queryParameters,
String endpoint,
CallMetadata callMetadata) {
boolean allow;
String reason;
String callerId = CollectionsExt.isNullOrEmpty(callMetadata.getCallers())
? "unknown"
: callMetadata.getCallers().get(0).getId();
if (jobDataReplicator.getStalenessMs() > configuration.getMaxAcceptableCacheStalenessMs()) {
allow = false;
reason = "stale";
} else {
// Check if cache explicitly requested.
if ("true".equalsIgnoreCase(queryParameters.getOrDefault(PARAMETER_USE_CACHE, "false"))) {
allow = true;
reason = "requestedDirectly";
} else if (callerId.equals("unknown")) {
allow = false;
reason = "callerNotSet";
} else {
allow = callerIdMatcher.apply(callerId).matches();
reason = allow ? "configurationMatch" : "callerNotMatching";
}
}
titusRuntime.getRegistry()
.counter(METRIC_CACHE_USE_ALLOWED,
"endpoint", endpoint,
"callerId", callerId,
"allowed", Boolean.toString(allow),
"reason", reason
)
.increment();
return allow;
}
public Optional<com.netflix.titus.grpc.protogen.Job> findJob(String jobId) {
return jobDataReplicator.getCurrent().findJob(jobId).map(GrpcJobManagementModelConverters::toGrpcJob);
}
public JobQueryResult findJobs(JobQuery jobQuery) {
JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> queryCriteria = GrpcJobQueryModelConverters.toJobQueryCriteria(jobQuery);
Page page = toPage(jobQuery.getPage());
List<Job> matchingJobs = findMatchingJob(queryCriteria);
PageResult<Job> pageResult = JobManagerCursors.newCoreJobPaginationEvaluator().takePage(page, matchingJobs);
Set<String> fields = newFieldsFilter(jobQuery.getFieldsList(), JOB_MINIMUM_FIELD_SET);
List<com.netflix.titus.grpc.protogen.Job> grpcJob = pageResult.getItems().stream()
.map(coreJob -> {
com.netflix.titus.grpc.protogen.Job job = GrpcJobManagementModelConverters.toGrpcJob(coreJob);
if (!fields.isEmpty()) {
job = ProtobufExt.copy(job, fields);
}
return job;
})
.collect(Collectors.toList());
return JobQueryResult.newBuilder()
.setPagination(toGrpcPagination(pageResult.getPagination()))
.addAllItems(grpcJob)
.build();
}
public Optional<Task> findTask(String taskId) {
return jobDataReplicator.getCurrent()
.findTaskById(taskId)
.map(jobTaskPair -> GrpcJobManagementModelConverters.toGrpcTask(jobTaskPair.getRight(), logStorageInfo));
}
public TaskQueryResult findTasks(TaskQuery taskQuery) {
JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> queryCriteria = GrpcJobQueryModelConverters.toJobQueryCriteria(taskQuery);
Page page = toPage(taskQuery.getPage());
List<com.netflix.titus.api.jobmanager.model.job.Task> matchingTasks = findMatchingTasks(queryCriteria);
PageResult<com.netflix.titus.api.jobmanager.model.job.Task> pageResult = JobManagerCursors.newCoreTaskPaginationEvaluator().takePage(page, matchingTasks);
Set<String> fields = newFieldsFilter(taskQuery.getFieldsList(), TASK_MINIMUM_FIELD_SET);
List<Task> grpcTasks = pageResult.getItems().stream()
.map(task -> {
Task grpcTask = GrpcJobManagementModelConverters.toGrpcTask(task, logStorageInfo);
if (!fields.isEmpty()) {
grpcTask = ProtobufExt.copy(grpcTask, fields);
}
return grpcTask;
})
.collect(Collectors.toList());
return TaskQueryResult.newBuilder()
.setPagination(toGrpcPagination(pageResult.getPagination()))
.addAllItems(grpcTasks)
.build();
}
public Observable<JobChangeNotification> observeJob(String jobId) {
ObserveJobsQuery query = ObserveJobsQuery.newBuilder().putFilteringCriteria("jobIds", jobId).build();
return observeJobs(query).takeUntil(this::isJobFinishedEvent);
}
/**
* Job finished event is the last one that is emitted for every completed job.
*/
private boolean isJobFinishedEvent(JobChangeNotification event) {
return event.getNotificationCase() == JobChangeNotification.NotificationCase.JOBUPDATE &&
event.getJobUpdate().getJob().getStatus().getState() == JobStatus.JobState.Finished;
}
public Observable<JobChangeNotification> observeJobs(ObserveJobsQuery query) {
JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> criteria = toJobQueryCriteria(query);
V3JobQueryCriteriaEvaluator jobsPredicate = new V3JobQueryCriteriaEvaluator(criteria, titusRuntime);
V3TaskQueryCriteriaEvaluator tasksPredicate = new V3TaskQueryCriteriaEvaluator(criteria, titusRuntime);
Set<String> jobFields = newFieldsFilter(query.getJobFieldsList(), JOB_MINIMUM_FIELD_SET);
Set<String> taskFields = newFieldsFilter(query.getTaskFieldsList(), TASK_MINIMUM_FIELD_SET);
Flux<JobChangeNotification> eventStream = Flux.defer(() -> {
AtomicBoolean first = new AtomicBoolean(true);
return jobDataReplicator.events()
.subscribeOn(scheduler)
.publishOn(scheduler)
.flatMap(event -> {
JobManagerEvent<?> jobManagerEvent = event.getRight();
long now = titusRuntime.getClock().wallTime();
JobSnapshot snapshot = event.getLeft();
Optional<JobChangeNotification> grpcEvent = toObserveJobsEvent(snapshot, jobManagerEvent, now, jobsPredicate, tasksPredicate, jobFields, taskFields);
// On first event emit full snapshot first
if (first.getAndSet(false)) {
List<JobChangeNotification> snapshotEvents = buildSnapshot(snapshot, now, jobsPredicate, tasksPredicate, jobFields, taskFields);
grpcEvent.ifPresent(snapshotEvents::add);
return Flux.fromIterable(snapshotEvents);
}
// If we already emitted something we have to first disconnect this stream, and let the client
// subscribe again. Snapshot marker indicates that the underlying GRPC stream was disconnected.
if (jobManagerEvent == JobManagerEvent.snapshotMarker()) {
return Mono.error(new StatusRuntimeException(Status.ABORTED.augmentDescription(
"Downstream event stream reconnected."
)));
}
// Job data replicator emits keep alive events periodically if there is nothing in the stream. We have
// to filter them out here.
if (jobManagerEvent instanceof JobKeepAliveEvent) {
// Check if staleness is not too high.
if (jobDataReplicator.getStalenessMs() > configuration.getObserveJobsStalenessDisconnectMs()) {
rejectedByStalenessTooHighMetric.increment();
return Mono.error(new StatusRuntimeException(Status.ABORTED.augmentDescription(
"Data staleness in the event stream is too high. Most likely caused by connectivity issue to the downstream server."
)));
}
return Mono.empty();
}
return grpcEvent.map(Flux::just).orElseGet(Flux::empty);
});
});
return ReactorExt.toObservable(eventStream);
}
/**
* Observable that completes when the job replicator cache checkpoint reaches the request timestamp.
* It does not emit any value.
*/
<T> Observable<T> syncCache(String endpoint, Class<T> type) {
Flux<T> fluxSync = Flux.defer(() -> {
long startTime = titusRuntime.getClock().wallTime();
return jobDataReplicator.observeLastCheckpointTimestamp()
.subscribeOn(scheduler)
.publishOn(scheduler)
.skipUntil(timestamp -> timestamp >= startTime)
.take(1)
.flatMap(timestamp -> {
syncDelayMetric.withTags(endpoint).ifPresent(m -> {
long delayMs = titusRuntime.getClock().wallTime() - timestamp;
logger.debug("Cache sync delay: method={}, delaysMs={}", endpoint, delayMs);
m.recordLevel(delayMs);
});
return Mono.empty();
});
});
return ReactorExt.toObservable(fluxSync);
}
private Set<String> newFieldsFilter(List<String> fields, Set<String> minimumFieldSet) {
return fields.isEmpty()
? Collections.emptySet()
: CollectionsExt.merge(new HashSet<>(fields), minimumFieldSet);
}
private List<com.netflix.titus.api.jobmanager.model.job.Job> findMatchingJob(JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> queryCriteria) {
JobSnapshot jobSnapshot = jobDataReplicator.getCurrent();
Map<String, Job<?>> jobsById = jobSnapshot.getJobMap();
V3JobQueryCriteriaEvaluator queryFilter = new V3JobQueryCriteriaEvaluator(queryCriteria, titusRuntime);
List<com.netflix.titus.api.jobmanager.model.job.Job> matchingJobs = new ArrayList<>();
jobsById.forEach((jobId, job) -> {
List<com.netflix.titus.api.jobmanager.model.job.Task> tasks = new ArrayList<>(jobSnapshot.getTasks(jobId).values());
Pair<Job<?>, List<com.netflix.titus.api.jobmanager.model.job.Task>> jobTaskPair = Pair.of(job, tasks);
if (queryFilter.test(jobTaskPair)) {
matchingJobs.add(job);
}
});
return matchingJobs;
}
private List<com.netflix.titus.api.jobmanager.model.job.Task> findMatchingTasks(JobQueryCriteria<TaskStatus.TaskState, JobDescriptor.JobSpecCase> queryCriteria) {
JobSnapshot jobSnapshot = jobDataReplicator.getCurrent();
Map<String, Job<?>> jobsById = jobSnapshot.getJobMap();
V3TaskQueryCriteriaEvaluator queryFilter = new V3TaskQueryCriteriaEvaluator(queryCriteria, titusRuntime);
List<com.netflix.titus.api.jobmanager.model.job.Task> matchingTasks = new ArrayList<>();
jobsById.forEach((jobId, job) -> {
Map<String, com.netflix.titus.api.jobmanager.model.job.Task> tasks = jobSnapshot.getTasks(jobId);
if (!CollectionsExt.isNullOrEmpty(tasks)) {
tasks.forEach((taskId, task) -> {
Pair<Job<?>, com.netflix.titus.api.jobmanager.model.job.Task> jobTaskPair = Pair.of(job, task);
if (queryFilter.test(jobTaskPair)) {
matchingTasks.add(task);
}
});
}
});
return matchingTasks;
}
private List<JobChangeNotification> buildSnapshot(JobSnapshot snapshot,
long now,
V3JobQueryCriteriaEvaluator jobsPredicate,
V3TaskQueryCriteriaEvaluator tasksPredicate,
Set<String> jobFields,
Set<String> taskFields) {
List<JobChangeNotification> result = new ArrayList<>();
Map<String, Job<?>> allJobsMap = snapshot.getJobMap();
allJobsMap.forEach((jobId, job) -> {
List<com.netflix.titus.api.jobmanager.model.job.Task> tasks = new ArrayList<>(snapshot.getTasks(jobId).values());
if (jobsPredicate.test(Pair.of(job, tasks))) {
result.add(toGrpcJobEvent(job, now, jobFields));
}
});
snapshot.getTaskMap().forEach((taskId, task) -> {
Job<?> job = allJobsMap.get(task.getJobId());
if (job != null && tasksPredicate.test(Pair.of(job, task))) {
result.add(toGrpcTaskEvent(task, false, now, taskFields));
}
});
result.add(SNAPSHOT_END_MARKER);
return result;
}
private Optional<JobChangeNotification> toObserveJobsEvent(JobSnapshot snapshot,
JobManagerEvent<?> event,
long now,
V3JobQueryCriteriaEvaluator jobsPredicate,
V3TaskQueryCriteriaEvaluator tasksPredicate,
Set<String> jobFields,
Set<String> taskFields) {
if (event instanceof JobUpdateEvent) {
JobUpdateEvent jobUpdateEvent = (JobUpdateEvent) event;
Job<?> job = jobUpdateEvent.getCurrent();
List<com.netflix.titus.api.jobmanager.model.job.Task> tasks = new ArrayList<>(snapshot.getTasks(job.getId()).values());
return jobsPredicate.test(Pair.of(job, tasks)) ? Optional.of(toGrpcJobEvent(job, now, jobFields)) : Optional.empty();
}
if (event instanceof TaskUpdateEvent) {
TaskUpdateEvent taskUpdateEvent = (TaskUpdateEvent) event;
Job<?> job = taskUpdateEvent.getCurrentJob();
com.netflix.titus.api.jobmanager.model.job.Task task = taskUpdateEvent.getCurrentTask();
return tasksPredicate.test(Pair.of(job, task))
? Optional.of(toGrpcTaskEvent(task, taskUpdateEvent.isMovedFromAnotherJob(), now, taskFields))
: Optional.empty();
}
return Optional.empty();
}
private JobChangeNotification toGrpcJobEvent(Job<?> job, long now, Set<String> jobFields) {
com.netflix.titus.grpc.protogen.Job grpcJob = GrpcJobManagementModelConverters.toGrpcJob(job);
if (!jobFields.isEmpty()) {
grpcJob = ProtobufExt.copy(grpcJob, jobFields);
}
return JobChangeNotification.newBuilder()
.setJobUpdate(JobChangeNotification.JobUpdate.newBuilder().setJob(grpcJob))
.setTimestamp(now)
.build();
}
private JobChangeNotification toGrpcTaskEvent(com.netflix.titus.api.jobmanager.model.job.Task task,
boolean movedFromAnotherJob,
long now,
Set<String> taskFields) {
Task grpcTask = GrpcJobManagementModelConverters.toGrpcTask(task, logStorageInfo);
if (!taskFields.isEmpty()) {
grpcTask = ProtobufExt.copy(grpcTask, taskFields);
}
return JobChangeNotification.newBuilder()
.setTaskUpdate(JobChangeNotification.TaskUpdate.newBuilder().setTask(grpcTask).setMovedFromAnotherJob(movedFromAnotherJob))
.setTimestamp(now)
.build();
}
}
| 9,396 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/DefaultTitusManagementService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import javax.inject.Singleton;
import com.netflix.titus.gateway.service.v3.TitusManagementService;
@Singleton
public class DefaultTitusManagementService implements TitusManagementService {
}
| 9,397 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/DefaultSchedulerService.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import javax.inject.Inject;
import javax.inject.Singleton;
import com.netflix.titus.gateway.service.v3.SchedulerService;
import com.netflix.titus.grpc.protogen.SchedulerServiceGrpc.SchedulerServiceStub;
import com.netflix.titus.grpc.protogen.SchedulingResultEvent;
import com.netflix.titus.grpc.protogen.SchedulingResultRequest;
import com.netflix.titus.runtime.connector.GrpcClientConfiguration;
import com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil;
import com.netflix.titus.runtime.endpoint.metadata.CallMetadataResolver;
import io.grpc.stub.StreamObserver;
import rx.Observable;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createRequestObservable;
import static com.netflix.titus.runtime.endpoint.common.grpc.GrpcUtil.createSimpleClientResponseObserver;
@Singleton
public class DefaultSchedulerService implements SchedulerService {
private final GrpcClientConfiguration configuration;
private final SchedulerServiceStub client;
private final CallMetadataResolver callMetadataResolver;
@Inject
public DefaultSchedulerService(GrpcClientConfiguration configuration,
SchedulerServiceStub client,
CallMetadataResolver callMetadataResolver) {
this.configuration = configuration;
this.client = client;
this.callMetadataResolver = callMetadataResolver;
}
@Override
public Observable<SchedulingResultEvent> findLastSchedulingResult(String taskId) {
return createRequestObservable(emitter -> {
StreamObserver<SchedulingResultEvent> streamObserver = createSimpleClientResponseObserver(emitter);
GrpcUtil.createWrappedStubWithResolver(client, callMetadataResolver, configuration.getRequestTimeout()).getSchedulingResult(SchedulingResultRequest.newBuilder().setTaskId(taskId).build(), streamObserver);
}, configuration.getRequestTimeout());
}
@Override
public Observable<SchedulingResultEvent> observeSchedulingResults(String taskId) {
return createRequestObservable(emitter -> {
StreamObserver<SchedulingResultEvent> streamObserver = createSimpleClientResponseObserver(emitter);
GrpcUtil.createWrappedStubWithResolver(client, callMetadataResolver).observeSchedulingResults(SchedulingResultRequest.newBuilder().setTaskId(taskId).build(), streamObserver);
});
}
}
| 9,398 |
0 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3 | Create_ds/titus-control-plane/titus-server-gateway/src/main/java/com/netflix/titus/gateway/service/v3/internal/GatewayConfiguration.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.gateway.service.v3.internal;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
@Configuration(prefix = "titusGateway")
public interface GatewayConfiguration {
/**
* Maximum number of tasks in a page. This limit exists due to the GRPC buffer limits.
*/
@DefaultValue("5000")
int getMaxTaskPageSize();
/**
* If the cache staleness is above this threshold, it will not be used for query requests.
*/
@DefaultValue("5000")
long getMaxAcceptableCacheStalenessMs();
/**
* If the cache staleness is above this threshold, disconnect all observeJob(s) sourced from this cache.
*/
@DefaultValue("5000")
long getObserveJobsStalenessDisconnectMs();
/**
* Configure callers whose queries should be handled from the local cache.
*/
@DefaultValue("NONE")
String getQueryFromCacheCallerId();
/**
* Thread pool size handling client requests from the local cache. The {@link LocalCacheQueryProcessor} sync method
* most of the time completes with TJC GRPC callback when handling the keep alive message. Using that GRPC thread
* for further processing would cause a bottleneck on both sides.
*/
@DefaultValue("100")
int getLocalCacheSchedulerThreadPoolSize();
}
| 9,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.