index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/publish/package-info.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* <p/>
* Interfaces for collecting metrics and publishing them to observers.
* <p/>
* Interfaces for collecting metrics and publishing them to observers.
* <p/>
* Interfaces for collecting metrics and publishing them to observers.
*/
/**
* Interfaces for collecting metrics and publishing them to observers.
*/
package com.netflix.servo.publish;
| 3,000 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/publish/JmxConnector.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.publish;
import javax.management.MBeanServerConnection;
/**
* Used to get a connection to a JMX mbean server.
*/
public interface JmxConnector {
/**
* Returns a connection to an mbean server that can be used to poll metrics
* from JMX.
*/
MBeanServerConnection getConnection();
}
| 3,001 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/publish/MonitorRegistryMetricPoller.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.publish;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.Metric;
import com.netflix.servo.MonitorRegistry;
import com.netflix.servo.monitor.BasicCounter;
import com.netflix.servo.monitor.CompositeMonitor;
import com.netflix.servo.monitor.Counter;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.Monitor;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.util.Clock;
import com.netflix.servo.util.ClockWithOffset;
import com.netflix.servo.util.ThreadFactories;
import com.netflix.servo.util.TimeLimiter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
/**
* Poller for fetching {@link com.netflix.servo.annotations.Monitor} metrics
* from a monitor registry.
*/
public final class MonitorRegistryMetricPoller implements MetricPoller {
private static final Logger LOGGER = LoggerFactory.getLogger(MonitorRegistryMetricPoller.class);
private static final String GET_VALUE_ERROR = "servo.getValueError";
private static final Counter TIMEOUT_ERROR = new BasicCounter(MonitorConfig
.builder(GET_VALUE_ERROR)
.withTag("id", "timeout")
.build());
static {
DefaultMonitorRegistry.getInstance().register(TIMEOUT_ERROR);
}
private final MonitorRegistry registry;
private final long cacheTTL;
private final AtomicReference<List<Monitor<?>>> cachedMonitors =
new AtomicReference<>();
private final AtomicLong cacheLastUpdateTime = new AtomicLong(0L);
// Put limit on fetching the monitor value in-case someone does something silly like call a
// remote service inline
private final TimeLimiter limiter;
private final ExecutorService service;
private final Clock clock;
/**
* Creates a new instance using {@link com.netflix.servo.DefaultMonitorRegistry}.
*/
public MonitorRegistryMetricPoller() {
this(DefaultMonitorRegistry.getInstance(), 0L, TimeUnit.MILLISECONDS, true);
}
/**
* Creates a new instance using the specified registry.
*
* @param registry registry to query for annotated objects
*/
public MonitorRegistryMetricPoller(MonitorRegistry registry) {
this(registry, 0L, TimeUnit.MILLISECONDS, true, ClockWithOffset.INSTANCE);
}
/**
* Creates a new instance using the specified registry and a time limiter.
*
* @param registry registry to query for annotated objects
* @param cacheTTL how long to cache the filtered monitor list from the registry
* @param unit time unit for the cache ttl
*/
public MonitorRegistryMetricPoller(MonitorRegistry registry, long cacheTTL, TimeUnit unit) {
this(registry, cacheTTL, unit, true);
}
/**
* Creates a new instance using the specified registry.
*
* @param registry registry to query for annotated objects
* @param cacheTTL how long to cache the filtered monitor list from the registry
* @param unit time unit for the cache ttl
* @param useLimiter whether to use a time limiter for getting the values from the monitors
*/
public MonitorRegistryMetricPoller(MonitorRegistry registry, long cacheTTL, TimeUnit unit,
boolean useLimiter) {
this(registry, cacheTTL, unit, useLimiter, ClockWithOffset.INSTANCE);
}
/**
* Creates a new instance using the specified registry.
*
* @param registry registry to query for annotated objects
* @param cacheTTL how long to cache the filtered monitor list from the registry
* @param unit time unit for the cache ttl
* @param useLimiter whether to use a time limiter for getting the values from the monitors
* @param clock clock instance to use to get the time
*/
public MonitorRegistryMetricPoller(
MonitorRegistry registry, long cacheTTL, TimeUnit unit, boolean useLimiter,
Clock clock) {
this.registry = registry;
this.cacheTTL = TimeUnit.MILLISECONDS.convert(cacheTTL, unit);
this.clock = clock;
if (useLimiter) {
final ThreadFactory factory =
ThreadFactories.withName("ServoMonitorGetValueLimiter-%d");
service = Executors.newSingleThreadExecutor(factory);
limiter = new TimeLimiter(service);
} else {
service = null;
limiter = null;
}
}
private Object getValue(Monitor<?> monitor) {
try {
if (limiter != null) {
final MonitorValueCallable c = new MonitorValueCallable(monitor);
return limiter.callWithTimeout(c, 1, TimeUnit.SECONDS);
} else {
return monitor.getValue();
}
} catch (TimeLimiter.UncheckedTimeoutException e) {
LOGGER.warn("timeout trying to get value for {}", monitor.getConfig());
TIMEOUT_ERROR.increment();
} catch (Exception e) {
LOGGER.warn("failed to get value for {}", monitor.getConfig(), e);
DynamicCounter.increment(GET_VALUE_ERROR, "id", e.getClass().getSimpleName());
}
return null;
}
private void getMonitors(List<Monitor<?>> monitors, MetricFilter filter, Monitor<?> monitor) {
if (monitor instanceof CompositeMonitor<?>) {
for (Monitor<?> m : ((CompositeMonitor<?>) monitor).getMonitors()) {
getMonitors(monitors, filter, m);
}
} else if (filter.matches(monitor.getConfig())) {
monitors.add(monitor);
}
}
private void refreshMonitorCache(MetricFilter filter) {
final long age = System.currentTimeMillis() - cacheLastUpdateTime.get();
if (age > cacheTTL) {
List<Monitor<?>> monitors = new ArrayList<>();
for (Monitor<?> monitor : registry.getRegisteredMonitors()) {
try {
getMonitors(monitors, filter, monitor);
} catch (Exception e) {
LOGGER.warn("failed to get monitors for composite {}", monitor.getConfig(), e);
}
}
cacheLastUpdateTime.set(clock.now());
cachedMonitors.set(monitors);
LOGGER.debug("cache refreshed, {} monitors matched filter, previous age {} seconds",
monitors.size(), age / 1000);
} else {
LOGGER.debug("cache age of {} seconds is within ttl of {} seconds",
age / 1000, cacheTTL / 1000);
}
}
/**
* {@inheritDoc}
*/
public List<Metric> poll(MetricFilter filter) {
return poll(filter, false);
}
/**
* {@inheritDoc}
*/
public List<Metric> poll(MetricFilter filter, boolean reset) {
refreshMonitorCache(filter);
List<Monitor<?>> monitors = cachedMonitors.get();
List<Metric> metrics = new ArrayList<>(monitors.size());
for (Monitor<?> monitor : monitors) {
Object v = getValue(monitor);
if (v != null) {
metrics.add(new Metric(monitor.getConfig(), clock.now(), v));
}
}
return metrics;
}
/**
* Shutsdown the thread executor used for time limiting the get value calls. It is a good idea
* to call this and explicitly cleanup the thread. In most cases the threads will be cleaned
* up when the executor is garbage collected if shutdown is not called explicitly.
*/
public void shutdown() {
if (service != null) {
service.shutdownNow();
}
}
private static class MonitorValueCallable implements Callable<Object> {
private final Monitor<?> monitor;
MonitorValueCallable(Monitor<?> monitor) {
this.monitor = monitor;
}
public Object call() throws Exception {
return monitor.getValue();
}
}
}
| 3,002 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/ThreadLocalTaggingContext.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag;
/**
* Keeps track of tags that should be applied to counters incremented in the
* current thread. Can be used to customize the context for code executed in
* a particular thread. For example, on a server with a thread per request the
* context can be set so metrics will be tagged accordingly.
*/
public final class ThreadLocalTaggingContext implements TaggingContext {
private final ThreadLocal<TagList> context = new ThreadLocal<>();
private static final ThreadLocalTaggingContext INSTANCE = new ThreadLocalTaggingContext();
/**
* Get the instance.
*/
public static ThreadLocalTaggingContext getInstance() {
return INSTANCE;
}
private ThreadLocalTaggingContext() {
}
/**
* Set the tags to be associated with the current thread.
*/
public void setTags(TagList tags) {
context.set(tags);
}
/**
* Get the tags associated with the current thread.
*/
@Override
public TagList getTags() {
return context.get();
}
/**
* Remove the tags associated with the current thread.
*/
public void reset() {
context.remove();
}
}
| 3,003 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/SmallTagMap.java | /**
* Copyright 2014 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag;
import com.netflix.servo.util.Preconditions;
import com.netflix.servo.util.Strings;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Set;
/**
* This class is not intended to be used by 3rd parties and should be considered an implementation
* detail.
*/
public class SmallTagMap implements Iterable<Tag> {
/**
* Max number of tags supported in a tag map. Attempting to add additional tags
* will result in a warning logged.
*/
public static final int MAX_TAGS = 32;
/**
* Initial size for the map.
*/
public static final int INITIAL_TAG_SIZE = 8;
private static final Logger LOGGER = LoggerFactory.getLogger(SmallTagMap.class);
/**
* Return a new builder to assist in creating a new SmallTagMap using the default tag size (8).
*/
public static Builder builder() {
return new Builder(INITIAL_TAG_SIZE);
}
/**
* Helper class to build the immutable map.
*/
public static class Builder {
private int actualSize = 0;
private int size;
private Object[] buf;
private void init(int size) {
this.size = size;
buf = new Object[size * 2];
actualSize = 0;
}
/**
* Create a new builder with the specified capacity.
*
* @param size Size of the underlying array.
*/
public Builder(int size) {
init(size);
}
/**
* Get the number of entries in this map..
*/
public int size() {
return actualSize;
}
/**
* True if this builder does not have any tags added to it.
*/
public boolean isEmpty() {
return actualSize == 0;
}
private void resizeIfPossible(Tag tag) {
if (size < MAX_TAGS) {
Object[] prevBuf = buf;
init(size * 2);
for (int i = 1; i < prevBuf.length; i += 2) {
Tag t = (Tag) prevBuf[i];
if (t != null) {
add(t);
}
}
add(tag);
} else {
final String msg = String.format(
"Cannot add Tag %s - Maximum number of tags (%d) reached.",
tag, MAX_TAGS);
LOGGER.error(msg);
}
}
/**
* Adds a new tag to this builder.
*/
public Builder add(Tag tag) {
String k = tag.getKey();
int pos = (int) (Math.abs((long) k.hashCode()) % size);
int i = pos;
Object ki = buf[i * 2];
while (ki != null && !ki.equals(k)) {
i = (i + 1) % size;
if (i == pos) {
resizeIfPossible(tag);
return this;
}
ki = buf[i * 2];
}
if (ki != null) {
buf[i * 2] = k;
buf[i * 2 + 1] = tag;
} else {
if (buf[i * 2] != null) {
throw new IllegalStateException("position has already been filled");
}
buf[i * 2] = k;
buf[i * 2 + 1] = tag;
actualSize += 1;
}
return this;
}
/**
* Adds all tags from the {@link Iterable} tags to this builder.
*/
public Builder addAll(Iterable<Tag> tags) {
for (Tag tag : tags) {
add(tag);
}
return this;
}
/**
* Get the resulting SmallTagMap.
*/
public SmallTagMap result() {
Tag[] tagArray = new Tag[actualSize];
int tagIdx = 0;
for (int i = 1; i < buf.length; i += 2) {
Object o = buf[i];
if (o != null) {
tagArray[tagIdx++] = (Tag) o;
}
}
Arrays.sort(tagArray, (o1, o2) -> {
int keyCmp = o1.getKey().compareTo(o2.getKey());
if (keyCmp != 0) {
return keyCmp;
}
return o1.getValue().compareTo(o2.getValue());
});
assert (tagIdx == actualSize);
return new SmallTagMap(tagArray);
}
}
private class SmallTagIterator implements Iterator<Tag> {
private int i = 0;
@Override
public boolean hasNext() {
return i < tagArray.length;
}
@Override
public Tag next() {
if (i < tagArray.length) {
return tagArray[i++];
}
throw new NoSuchElementException();
}
@Override
public void remove() {
throw new UnsupportedOperationException("SmallTagMaps are immutable");
}
}
@Override
public Iterator<Tag> iterator() {
return new SmallTagIterator();
}
private int cachedHashCode = 0;
private final Tag[] tagArray;
/**
* Create a new SmallTagMap using the given array and size.
*
* @param tagArray sorted array of tags
*/
SmallTagMap(Tag[] tagArray) {
this.tagArray = Preconditions.checkNotNull(tagArray, "tagArray");
}
static int binarySearch(Tag[] a, String key) {
int low = 0;
int high = a.length - 1;
while (low <= high) {
final int mid = (low + high) >>> 1;
final Tag midValTag = a[mid];
final String midVal = midValTag.getKey();
final int cmp = midVal.compareTo(key);
if (cmp < 0) {
low = mid + 1;
} else if (cmp > 0) {
high = mid - 1;
} else {
return mid; // tag key found
}
}
return -(low + 1); // tag key not found.
}
/**
* Get the tag associated with a given key.
*/
public Tag get(String key) {
int idx = binarySearch(tagArray, key);
if (idx < 0) {
return null;
} else {
return tagArray[idx];
}
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
if (cachedHashCode == 0) {
cachedHashCode = Arrays.hashCode(tagArray);
}
return cachedHashCode;
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return "SmallTagMap{" + Strings.join(",", iterator()) + "}";
}
/**
* Returns true whether this map contains a Tag with the given key.
*/
public boolean containsKey(String k) {
return get(k) != null;
}
/**
* Returns true if this map has no entries.
*/
public boolean isEmpty() {
return tagArray.length == 0;
}
/**
* Returns the number of Tags stored in this map.
*/
public int size() {
return tagArray.length;
}
/**
* Returns the {@link Set} of tags.
*
* @deprecated This method will be removed in the next version. This is an expensive method
* and not in the spirit of this class which is to be more efficient than the standard
* collections library.
*/
@Deprecated
public Set<Tag> tagSet() {
return new HashSet<>(Arrays.asList(tagArray));
}
@Override
/** {@inheritDoc} */
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || !(obj instanceof SmallTagMap)) {
return false;
}
SmallTagMap that = (SmallTagMap) obj;
return Arrays.equals(tagArray, that.tagArray);
}
}
| 3,004 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/TaggingContext.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag;
/**
* Returns the set of tags associated with the current execution context.
* Implementations of this interface are used to provide a common set of tags
* for all contextual monitors in a given execution flow.
*/
public interface TaggingContext {
/**
* Returns the tags for the current execution context.
*/
TagList getTags();
}
| 3,005 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/BasicTag.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag;
import com.netflix.servo.util.Preconditions;
/**
* Immutable tag.
*/
public final class BasicTag implements Tag {
private final String key;
private final String value;
/**
* Creates a new instance with the specified key and value.
*/
public BasicTag(String key, String value) {
this.key = checkNotEmpty(key, "key");
this.value = checkNotEmpty(value, "value");
}
/**
* Verify that the {@code v} is not null or an empty string.
*/
private static String checkNotEmpty(String v, String name) {
Preconditions.checkNotNull(v, name);
Preconditions.checkArgument(!v.isEmpty(), "%s cannot be empty", name);
return v;
}
/**
* {@inheritDoc}
*/
public String getKey() {
return key;
}
/**
* {@inheritDoc}
*/
public String getValue() {
return value;
}
/**
* {@inheritDoc}
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (o instanceof Tag) {
Tag t = (Tag) o;
return key.equals(t.getKey()) && value.equals(t.getValue());
} else {
return false;
}
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
int result = key.hashCode();
result = 31 * result + value.hashCode();
return result;
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return key + "=" + value;
}
/**
* Parse a string representing a tag. A tag string should have the format {@code key=value}.
* Whitespace at the ends of the key and value will be removed. Both the key and value must
* have at least one character.
*
* @param tagString string with encoded tag
* @return tag parsed from the string
* @deprecated Use Tags.parseTag instead.
*/
public static BasicTag parseTag(String tagString) {
return (BasicTag) Tags.parseTag(tagString);
}
/**
* {@inheritDoc}
*/
@Override
public String tagString() {
return toString();
}
}
| 3,006 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/Tags.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag;
import com.google.common.collect.Interner;
import com.google.common.collect.Interners;
/**
* Helper functions for working with tags and tag lists.
*/
public final class Tags {
/**
* Keep track of the strings that have been used for keys and values.
*/
private static final Interner<String> STR_CACHE = Interners.newWeakInterner();
/**
* Keep track of tags that have been seen before and reuse.
*/
private static final Interner<Tag> TAG_CACHE = Interners.newWeakInterner();
/**
* Intern strings used for tag keys or values.
*/
public static String intern(String v) {
return STR_CACHE.intern(v);
}
/**
* Returns the canonical representation of a tag.
*/
public static Tag intern(Tag t) {
return TAG_CACHE.intern(t);
}
/**
* Interns custom tag types, assumes that basic tags are already interned. This is used to
* ensure that we have a common view of tags internally. In particular, different subclasses of
* Tag may not be equal even if they have the same key and value. Tag lists should use this to
* ensure the equality will work as expected.
*/
static Tag internCustom(Tag t) {
return (t instanceof BasicTag) ? t : newTag(t.getKey(), t.getValue());
}
/**
* Create a new tag instance.
*/
public static Tag newTag(String key, String value) {
Tag newTag = new BasicTag(intern(key), intern(value));
return intern(newTag);
}
/**
* Parse a string representing a tag. A tag string should have the format {@code key=value}.
* Whitespace at the ends of the key and value will be removed. Both the key and value must
* have at least one character.
*
* @param tagString string with encoded tag
* @return tag parsed from the string
*/
public static Tag parseTag(String tagString) {
String k;
String v;
int eqIndex = tagString.indexOf("=");
if (eqIndex < 0) {
throw new IllegalArgumentException("key and value must be separated by '='");
}
k = tagString.substring(0, eqIndex).trim();
v = tagString.substring(eqIndex + 1, tagString.length()).trim();
return newTag(k, v);
}
/**
* Utility class.
*/
private Tags() {
}
}
| 3,007 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/SortedTagList.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag;
import com.netflix.servo.util.Strings;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
/**
* A {@link com.netflix.servo.tag.TagList} backed by a {@link SortedMap}.
* <p/>
* Prefer the more efficient {@link com.netflix.servo.tag.BasicTagList} implementation which
* also provides an {@code asMap} method that returns a sorted map of tags.
*/
public final class SortedTagList implements TagList {
/**
* An empty {@code SortedTagList}.
*/
public static final SortedTagList EMPTY = new Builder().build();
private final SortedMap<String, Tag> tagSortedMap;
private final int size;
/**
* Helper class to construct {@code SortedTagList} objects.
*/
public static final class Builder {
private final Map<String, Tag> data = new HashMap<>();
/**
* Add the collection of tags {@code tagsCollection} to this builder and
* return self.
*/
public Builder withTags(Collection<Tag> tagsCollection) {
for (Tag tag : tagsCollection) {
final Tag t = Tags.internCustom(tag);
data.put(t.getKey(), t);
}
return this;
}
/**
* Add all tags from the {@link com.netflix.servo.tag.TagList} tags to this builder
* and return self.
*/
public Builder withTags(TagList tags) {
for (Tag tag : tags) {
final Tag t = Tags.internCustom(tag);
data.put(t.getKey(), t);
}
return this;
}
/**
* Add the {@link Tag} to this builder and return self.
*/
public Builder withTag(Tag tag) {
final Tag t = Tags.internCustom(tag);
data.put(t.getKey(), t);
return this;
}
/**
* Add the tag specified by {@code key} and {@code value} to this builder and return self.
*/
public Builder withTag(String key, String value) {
return withTag(Tags.newTag(key, value));
}
/**
* Construct the {@code SortedTagList}.
*/
public SortedTagList build() {
return new SortedTagList(this);
}
}
private SortedTagList(Builder builder) {
this.tagSortedMap = Collections.unmodifiableSortedMap(
new TreeMap<>(builder.data));
this.size = tagSortedMap.size();
}
/**
* {@inheritDoc}
*/
@Override
public Tag getTag(String key) {
return tagSortedMap.get(key);
}
/**
* {@inheritDoc}
*/
public String getValue(String key) {
final Tag t = tagSortedMap.get(key);
return (t == null) ? null : t.getValue();
}
/**
* {@inheritDoc}
*/
@Override
public boolean containsKey(String key) {
return tagSortedMap.containsKey(key);
}
/**
* {@inheritDoc}
*/
@Override
public boolean isEmpty() {
return tagSortedMap.isEmpty();
}
/**
* {@inheritDoc}
*/
@Override
public int size() {
return size;
}
/**
* {@inheritDoc}
*/
@Override
public Iterator<Tag> iterator() {
return tagSortedMap.values().iterator();
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, String> asMap() {
Map<String, String> stringMap = new LinkedHashMap<>(size);
for (Tag t : tagSortedMap.values()) {
stringMap.put(t.getKey(), t.getValue());
}
return stringMap;
}
/**
* Get a new {@link com.netflix.servo.tag.SortedTagList.Builder}.
*/
public static Builder builder() {
return new Builder();
}
/**
* {@inheritDoc}
*/
@Override
public boolean equals(Object obj) {
return (obj instanceof SortedTagList)
&& tagSortedMap.equals(((SortedTagList) obj).tagSortedMap);
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
return tagSortedMap.hashCode();
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return Strings.join(",", tagSortedMap.values().iterator());
}
}
| 3,008 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/Tag.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag;
/**
* A key-value pair associated with a metric.
*/
public interface Tag {
/**
* Returns the key corresponding to this tag.
*/
String getKey();
/**
* Returns the value corresponding to this tag.
*/
String getValue();
/**
* Returns the string representation of this tag.
*/
String tagString();
}
| 3,009 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/TagComparator.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag;
import java.io.Serializable;
import java.util.Comparator;
/**
* Comparator for ordering tags based on the key then the value.
*/
public class TagComparator implements Comparator<Tag>, Serializable {
/**
* {@inheritDoc}
*/
@Override
public int compare(Tag tag, Tag tag1) {
if (tag.getKey().equals(tag1.getKey())) {
return tag.getValue().compareTo(tag1.getValue());
}
return tag.getKey().compareTo(tag1.getKey());
}
}
| 3,010 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/BasicTagList.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag;
import com.netflix.servo.util.Iterables;
import com.netflix.servo.util.Preconditions;
import com.netflix.servo.util.Strings;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
/**
* Immutable tag list.
*/
public final class BasicTagList implements TagList {
/**
* An empty tag list.
*/
public static final TagList EMPTY = new BasicTagList(Collections.<Tag>emptySet());
private final SmallTagMap tagMap;
private SortedMap<String, String> sortedTaglist;
/**
* Create a BasicTagList from a {@link SmallTagMap}.
*/
public BasicTagList(SmallTagMap tagMap) {
this.tagMap = tagMap;
}
/**
* Creates a new instance with a fixed set of tags.
*
* @param entries entries to include in this tag list
*/
public BasicTagList(Iterable<Tag> entries) {
SmallTagMap.Builder builder = SmallTagMap.builder();
builder.addAll(entries);
tagMap = builder.result();
}
/**
* {@inheritDoc}
*/
public Tag getTag(String key) {
return tagMap.get(key);
}
/**
* {@inheritDoc}
*/
public String getValue(String key) {
final Tag t = tagMap.get(key);
return (t == null) ? null : t.getValue();
}
/**
* {@inheritDoc}
*/
public boolean containsKey(String key) {
return tagMap.containsKey(key);
}
/**
* {@inheritDoc}
*/
public boolean isEmpty() {
return tagMap.isEmpty();
}
/**
* {@inheritDoc}
*/
public int size() {
return tagMap.size();
}
/**
* {@inheritDoc}
*/
public Iterator<Tag> iterator() {
return tagMap.iterator();
}
/**
* {@inheritDoc}
*/
public Map<String, String> asMap() {
if (sortedTaglist != null) {
return sortedTaglist;
}
SortedMap<String, String> tagMap = new TreeMap<>();
for (Tag tag : this.tagMap) {
tagMap.put(tag.getKey(), tag.getValue());
}
sortedTaglist = Collections.unmodifiableSortedMap(tagMap);
return sortedTaglist;
}
/**
* Returns a new tag list with additional tags from {@code tags}. If there
* is a conflict with tag keys the tag from {@code tags} will be used.
*/
public BasicTagList copy(TagList tags) {
return concat(this, tags);
}
/**
* Returns a new tag list with an additional tag. If {@code key} is
* already present in this tag list the value will be overwritten with
* {@code value}.
*/
public BasicTagList copy(String key, String value) {
return concat(this, Tags.newTag(key, value));
}
/**
* {@inheritDoc}
*/
@Override
public boolean equals(Object obj) {
return this == obj
|| (obj instanceof BasicTagList) && tagMap.equals(((BasicTagList) obj).tagMap);
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
return tagMap.hashCode();
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return Strings.join(",", tagMap.iterator());
}
/**
* Returns a tag list containing the union of {@code t1} and {@code t2}.
* If there is a conflict with tag keys, the tag from {@code t2} will be
* used.
*/
public static BasicTagList concat(TagList t1, TagList t2) {
return new BasicTagList(Iterables.concat(t1, t2));
}
/**
* Returns a tag list containing the union of {@code t1} and {@code t2}.
* If there is a conflict with tag keys, the tag from {@code t2} will be
* used.
*/
public static BasicTagList concat(TagList t1, Tag... t2) {
return new BasicTagList(Iterables.concat(t1, Arrays.asList(t2)));
}
/**
* Returns a tag list from the list of key values passed.
* <p/>
* Example:
* <p/>
* <code>
* BasicTagList tagList = BasicTagList.of("id", "someId", "class", "someClass");
* </code>
*/
public static BasicTagList of(String... tags) {
Preconditions.checkArgument(tags.length % 2 == 0,
"tags must be a sequence of key,value pairs");
final SmallTagMap.Builder builder = SmallTagMap.builder();
for (int i = 0; i < tags.length; i += 2) {
Tag t = Tags.newTag(tags[i], tags[i + 1]);
builder.add(t);
}
return new BasicTagList(builder.result());
}
/**
* Returns a tag list from the tags.
*/
public static BasicTagList of(Tag... tags) {
return new BasicTagList(Arrays.asList(tags));
}
/**
* Returns a tag list that has a copy of {@code tags}.
*
* @deprecated Use {@link #of(Tag...)}
*/
@Deprecated
public static BasicTagList copyOf(Tag... tags) {
return new BasicTagList(Arrays.asList(tags));
}
/**
* Returns a tag list that has a copy of {@code tags}. Each tag value
* is expected to be a string parseable using {@link BasicTag#parseTag}.
*
* @deprecated Use {@link #of(String...)} with separate key, values instead.
*/
@Deprecated
public static BasicTagList copyOf(String... tags) {
return copyOf(Arrays.asList(tags));
}
/**
* Returns a tag list that has a copy of {@code tags}. Each tag value
* is expected to be a string parseable using {@link BasicTag#parseTag}.
*/
public static BasicTagList copyOf(Iterable<String> tags) {
SmallTagMap.Builder builder = SmallTagMap.builder();
for (String tag : tags) {
builder.add(Tags.parseTag(tag));
}
return new BasicTagList(builder.result());
}
/**
* Returns a tag list that has a copy of {@code tags}.
*/
public static BasicTagList copyOf(Map<String, String> tags) {
SmallTagMap.Builder builder = SmallTagMap.builder();
for (Map.Entry<String, String> tag : tags.entrySet()) {
builder.add(Tags.newTag(tag.getKey(), tag.getValue()));
}
return new BasicTagList(builder.result());
}
}
| 3,011 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/StandardTagKeys.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag;
/**
* Standard tag keys that are used within this library.
*/
public enum StandardTagKeys {
/**
* Canonical name for the class that is providing the metric.
*/
CLASS_NAME("ClassName"),
/**
* Monitor id if one is provided via the annotation.
*/
MONITOR_ID("MonitorId");
private final String keyName;
StandardTagKeys(String keyName) {
this.keyName = keyName;
}
public String getKeyName() {
return keyName;
}
}
| 3,012 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/InjectableTag.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag;
import org.slf4j.LoggerFactory;
import java.net.InetAddress;
import java.net.UnknownHostException;
/**
* Group of Tags whose values will be dynamically set at runtime
* based on local calls.
*/
public enum InjectableTag implements Tag {
/**
* The current hostname.
*/
HOSTNAME("hostname", getHostName()),
/**
* The ip for localhost.
*/
IP("ip", getIp());
private final String key;
private final String value;
InjectableTag(String key, String val) {
this.key = key;
this.value = val;
}
/**
* {@inheritDoc}
*/
@Override
public String getKey() {
return key;
}
/**
* {@inheritDoc}
*/
@Override
public String getValue() {
return value;
}
/**
* {@inheritDoc}
*/
@Override
public String tagString() {
return key + "=" + value;
}
private static String getHostName() {
return (loadAddress() != null) ? loadAddress().getHostName() : "unkownHost";
}
private static String getIp() {
return (loadAddress() != null) ? loadAddress().getHostAddress() : "unknownHost";
}
private static InetAddress loadAddress() {
try {
return InetAddress.getLocalHost();
} catch (UnknownHostException e) {
LoggerFactory.getLogger(InjectableTag.class).warn("Unable to load INET info.", e);
return null;
}
}
}
| 3,013 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/TagList.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag;
import java.util.Iterator;
import java.util.Map;
/**
* Represents a list of tags associated with a metric value.
*/
public interface TagList extends Iterable<Tag> {
/**
* Returns the tag matching a given key or null if not match is found.
*/
Tag getTag(String key);
/**
* Returns the value matching a given key or null if not match is found.
*/
String getValue(String key);
/**
* Returns true if this list has a tag with the given key.
*/
boolean containsKey(String key);
/**
* Returns true if this list is emtpy.
*/
boolean isEmpty();
/**
* Returns the number of tags in this list.
*/
int size();
/**
* {@inheritDoc}
*/
Iterator<Tag> iterator();
/**
* Returns a map containing a copy of the tags in this list.
*/
Map<String, String> asMap();
}
| 3,014 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/tag/package-info.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* <p/>
* Code related to Tagging objects with metadata.
* <p/>
* Code related to Tagging objects with metadata.
* <p/>
* Code related to Tagging objects with metadata.
*/
/**
* Code related to Tagging objects with metadata.
*/
package com.netflix.servo.tag;
| 3,015 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/stats/StatsBuffer.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.stats;
import com.netflix.servo.util.Preconditions;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* A simple circular buffer that records values, and computes useful stats.
* This implementation is not thread safe.
*/
public class StatsBuffer {
private int pos;
private int curSize;
private double mean;
private double variance;
private double stddev;
private long min;
private long max;
private long total;
private final double[] percentiles;
private final double[] percentileValues;
private final int size;
private final long[] values;
private final AtomicBoolean statsComputed = new AtomicBoolean(false);
/**
* Create a circular buffer that will be used to record values and compute useful stats.
*
* @param size The capacity of the buffer
* @param percentiles Array of percentiles to compute. For example { 95.0, 99.0 }.
* If no percentileValues are required pass a 0-sized array.
*/
public StatsBuffer(int size, double[] percentiles) {
Preconditions.checkArgument(size > 0, "Size of the buffer must be greater than 0");
Preconditions.checkArgument(percentiles != null,
"Percents array must be non-null. Pass a 0-sized array "
+ "if you don't want any percentileValues to be computed.");
Preconditions.checkArgument(validPercentiles(percentiles),
"All percentiles should be in the interval (0.0, 100.0]");
values = new long[size];
this.size = size;
this.percentiles = Arrays.copyOf(percentiles, percentiles.length);
this.percentileValues = new double[percentiles.length];
reset();
}
private static boolean validPercentiles(double[] percentiles) {
for (double percentile : percentiles) {
if (percentile <= 0.0 || percentile > 100.0) {
return false;
}
}
return true;
}
/**
* Reset our local state: All values are set to 0.
*/
public void reset() {
statsComputed.set(false);
pos = 0;
curSize = 0;
total = 0L;
mean = 0.0;
variance = 0.0;
stddev = 0.0;
min = 0L;
max = 0L;
for (int i = 0; i < percentileValues.length; ++i) {
percentileValues[i] = 0.0;
}
}
/**
* Record a new value for this buffer.
*/
public void record(long n) {
values[Integer.remainderUnsigned(pos++, size)] = n;
if (curSize < size) {
++curSize;
}
}
/**
* Compute stats for the current set of values.
*/
public void computeStats() {
if (statsComputed.getAndSet(true)) {
return;
}
if (curSize == 0) {
return;
}
Arrays.sort(values, 0, curSize); // to compute percentileValues
min = values[0];
max = values[curSize - 1];
total = 0L;
double sumSquares = 0.0;
for (int i = 0; i < curSize; ++i) {
total += values[i];
sumSquares += values[i] * values[i];
}
mean = (double) total / curSize;
if (curSize == 1) {
variance = 0d;
} else {
variance = (sumSquares - ((double) total * total / curSize)) / (curSize - 1);
}
stddev = Math.sqrt(variance);
computePercentiles(curSize);
}
private void computePercentiles(int curSize) {
for (int i = 0; i < percentiles.length; ++i) {
percentileValues[i] = calcPercentile(curSize, percentiles[i]);
}
}
private double calcPercentile(int curSize, double percent) {
if (curSize == 0) {
return 0.0;
}
if (curSize == 1) {
return values[0];
}
/*
* We use the definition from http://cnx.org/content/m10805/latest
* modified for 0-indexed arrays.
*/
final double rank = percent * curSize / 100.0; // SUPPRESS CHECKSTYLE MagicNumber
final int ir = (int) Math.floor(rank);
final int irNext = ir + 1;
final double fr = rank - ir;
if (irNext >= curSize) {
return values[curSize - 1];
} else if (fr == 0.0) {
return values[ir];
} else {
// Interpolate between the two bounding values
final double lower = values[ir];
final double upper = values[irNext];
return fr * (upper - lower) + lower;
}
}
/**
* Get the number of entries recorded up to the size of the buffer.
*/
public int getCount() {
return curSize;
}
/**
* Get the average of the values recorded.
*
* @return The average of the values recorded, or 0.0 if no values were recorded.
*/
public double getMean() {
return mean;
}
/**
* Get the variance for the population of the recorded values present in our buffer.
*
* @return The variance.p of the values recorded, or 0.0 if no values were recorded.
*/
public double getVariance() {
return variance;
}
/**
* Get the standard deviation for the population of the recorded values present in our buffer.
*
* @return The stddev.p of the values recorded, or 0.0 if no values were recorded.
*/
public double getStdDev() {
return stddev;
}
/**
* Get the minimum of the values currently in our buffer.
*
* @return The min of the values recorded, or 0.0 if no values were recorded.
*/
public long getMin() {
return min;
}
/**
* Get the max of the values currently in our buffer.
*
* @return The max of the values recorded, or 0.0 if no values were recorded.
*/
public long getMax() {
return max;
}
/**
* Get the total sum of the values recorded.
*
* @return The sum of the values recorded, or 0.0 if no values were recorded.
*/
public long getTotalTime() {
return total;
}
/**
* Get the computed percentileValues. See {@link StatsConfig} for how to request different
* percentileValues. Note that for efficiency reasons we return the actual array of
* computed values.
* Users must NOT modify this array.
*
* @return An array of computed percentileValues.
*/
public double[] getPercentileValues() {
return Arrays.copyOf(percentileValues, percentileValues.length);
}
/**
* Return the percentiles we will compute: For example: 95.0, 99.0.
*/
public double[] getPercentiles() {
return Arrays.copyOf(percentiles, percentiles.length);
}
/**
* Return the value for the percentile given an index.
* @param index If percentiles are [ 95.0, 99.0 ] index must be 0 or 1 to get the 95th
* or 99th percentile respectively.
*
* @return The value for the percentile requested.
*/
public double getPercentileValueForIdx(int index) {
return percentileValues[index];
}
}
| 3,016 |
0 | Create_ds/servo/servo-core/src/main/java/com/netflix/servo | Create_ds/servo/servo-core/src/main/java/com/netflix/servo/stats/StatsConfig.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.stats;
import java.util.Arrays;
/**
* Configuration options for a {@link com.netflix.servo.monitor.StatsTimer}
* <p/>
* By default we publish count (number of times the timer was executed), totalTime, and
* 95.0, and 99.0 percentiles.
* <p/>
* The size for the buffer used to store samples is controlled using the sampleSize field,
* and the frequency
* at which stats are computed is controlled with the computeFrequencyMillis option.
* By default these are
* set to 100,000 entries in the buffer, and computation at 60,000 ms (1 minute) intervals.
*/
public final class StatsConfig {
private static final String CLASS_NAME = StatsConfig.class.getCanonicalName();
private static final String SIZE_PROP = CLASS_NAME + ".sampleSize";
private static final String FREQ_PROP = CLASS_NAME + ".computeFreqMillis";
/**
* Builder for StatsConfig. By default the configuration includes count,
* total and 95th and 99th percentiles.
*/
public static class Builder {
private boolean publishCount = true;
private boolean publishTotal = true;
private boolean publishMin = false;
private boolean publishMax = false;
private boolean publishMean = false;
private boolean publishVariance = false;
private boolean publishStdDev = false;
private int sampleSize = Integer.parseInt(System.getProperty(SIZE_PROP, "1000"));
private long frequencyMillis = Long.parseLong(System.getProperty(FREQ_PROP, "60000"));
private double[] percentiles = {95.0, 99.0};
/**
* Whether to publish count or not.
*/
public Builder withPublishCount(boolean publishCount) {
this.publishCount = publishCount;
return this;
}
/**
* Whether to publish total or not.
*/
public Builder withPublishTotal(boolean publishTotal) {
this.publishTotal = publishTotal;
return this;
}
/**
* Whether to publish min or not.
*/
public Builder withPublishMin(boolean publishMin) {
this.publishMin = publishMin;
return this;
}
/**
* Whether to publish max or not.
*/
public Builder withPublishMax(boolean publishMax) {
this.publishMax = publishMax;
return this;
}
/**
* Whether to publish an average statistic or not. Note that if you plan
* to aggregate the values reported (for example across a cluster of nodes) you probably do
* not want to publish the average per node, and instead want to compute it by publishing
* total and count.
*/
public Builder withPublishMean(boolean publishMean) {
this.publishMean = publishMean;
return this;
}
/**
* Whether to publish variance or not.
*/
public Builder withPublishVariance(boolean publishVariance) {
this.publishVariance = publishVariance;
return this;
}
/**
* Whether to publish standard deviation or not.
*/
public Builder withPublishStdDev(boolean publishStdDev) {
this.publishStdDev = publishStdDev;
return this;
}
/**
* Set the percentiles to compute.
*
* @param percentiles An array of doubles describing which percentiles to compute. For
* example {@code {95.0, 99.0}}
*/
public Builder withPercentiles(double[] percentiles) {
this.percentiles = Arrays.copyOf(percentiles, percentiles.length);
return this;
}
/**
* Set the sample size.
*/
public Builder withSampleSize(int size) {
this.sampleSize = size;
return this;
}
/**
* How often to compute the statistics. Usually this will be set to the main
* poller interval. (Default is 60s.)
*/
public Builder withComputeFrequencyMillis(long frequencyMillis) {
this.frequencyMillis = frequencyMillis;
return this;
}
/**
* Create a new StatsConfig object.
*/
public StatsConfig build() {
return new StatsConfig(this);
}
}
private final boolean publishCount;
private final boolean publishTotal;
private final boolean publishMin;
private final boolean publishMax;
private final boolean publishMean;
private final boolean publishVariance;
private final boolean publishStdDev;
private final double[] percentiles;
private final int sampleSize;
private final long frequencyMillis;
/**
* Creates a new configuration object for stats gathering.
*/
public StatsConfig(Builder builder) {
this.publishCount = builder.publishCount;
this.publishTotal = builder.publishTotal;
this.publishMin = builder.publishMin;
this.publishMax = builder.publishMax;
this.publishMean = builder.publishMean;
this.publishVariance = builder.publishVariance;
this.publishStdDev = builder.publishStdDev;
this.sampleSize = builder.sampleSize;
this.frequencyMillis = builder.frequencyMillis;
this.percentiles = Arrays.copyOf(builder.percentiles, builder.percentiles.length);
}
/**
* Whether we should publish a 'count' statistic.
*/
public boolean getPublishCount() {
return publishCount;
}
/**
* Whether we should publish a 'totalTime' statistic.
*/
public boolean getPublishTotal() {
return publishTotal;
}
/**
* Whether we should publish a 'min' statistic.
*/
public boolean getPublishMin() {
return publishMin;
}
/**
* Whether we should publish a 'max' statistic.
*/
public boolean getPublishMax() {
return publishMax;
}
/**
* Whether we should publish an 'avg' statistic.
*/
public boolean getPublishMean() {
return publishMean;
}
/**
* Whether we should publish a 'variance' statistic.
*/
public boolean getPublishVariance() {
return publishVariance;
}
/**
* Whether we should publish a 'stdDev' statistic.
*/
public boolean getPublishStdDev() {
return publishStdDev;
}
/**
* Get the size of the buffer that we should use.
*/
public int getSampleSize() {
return sampleSize;
}
/**
* Get the frequency at which we should update all stats.
*/
public long getFrequencyMillis() {
return frequencyMillis;
}
/**
* Get a copy of the array that holds which percentiles we should compute. The percentiles
* are in the interval (0.0, 100.0)
*/
public double[] getPercentiles() {
return Arrays.copyOf(percentiles, percentiles.length);
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return "StatsConfig{"
+ "publishCount=" + publishCount
+ ", publishTotal=" + publishTotal
+ ", publishMin=" + publishMin
+ ", publishMax=" + publishMax
+ ", publishMean=" + publishMean
+ ", publishVariance=" + publishVariance
+ ", publishStdDev=" + publishStdDev
+ ", percentiles=" + Arrays.toString(percentiles)
+ ", sampleSize=" + sampleSize
+ ", frequencyMillis=" + frequencyMillis
+ '}';
}
/**
* {@inheritDoc}
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof StatsConfig)) {
return false;
}
final StatsConfig that = (StatsConfig) o;
return frequencyMillis == that.frequencyMillis
&& publishCount == that.publishCount
&& publishMax == that.publishMax
&& publishMean == that.publishMean
&& publishMin == that.publishMin
&& publishStdDev == that.publishStdDev
&& publishTotal == that.publishTotal
&& publishVariance == that.publishVariance
&& sampleSize == that.sampleSize
&& Arrays.equals(percentiles, that.percentiles);
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
int result = (publishCount ? 1 : 0);
result = 31 * result + (publishTotal ? 1 : 0);
result = 31 * result + (publishMin ? 1 : 0);
result = 31 * result + (publishMax ? 1 : 0);
result = 31 * result + (publishMean ? 1 : 0);
result = 31 * result + (publishVariance ? 1 : 0);
result = 31 * result + (publishStdDev ? 1 : 0);
result = 31 * result + Arrays.hashCode(percentiles);
result = 31 * result + sampleSize;
result = 31 * result + (int) (frequencyMillis ^ (frequencyMillis >>> 32));
return result;
}
}
| 3,017 |
0 | Create_ds/servo/servo-aws/src/test/java/com/netflix/servo/publish | Create_ds/servo/servo-aws/src/test/java/com/netflix/servo/publish/cloudwatch/CloudWatchValueTest.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.publish.cloudwatch;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
import com.amazonaws.services.cloudwatch.AmazonCloudWatchClient;
import com.amazonaws.services.cloudwatch.model.MetricDatum;
import com.amazonaws.services.cloudwatch.model.PutMetricDataRequest;
import java.util.Date;
//CHECKSTYLE.OFF: LineLength
/**
* Test program for exploring the limits for values that can be written to cloudwatch.
*
* <pre>
* ERROR NaN 1024 - com.amazonaws.services.cloudwatch.model.InvalidParameterValueException: The value ? for parameter MetricData.member.1.Value is invalid.
* ERROR -Infinity 1024 - com.amazonaws.services.cloudwatch.model.InvalidParameterValueException: The value -∞ for parameter MetricData.member.1.Value is invalid.
* ERROR Infinity 1024 - com.amazonaws.services.cloudwatch.model.InvalidParameterValueException: The value ∞ for parameter MetricData.member.1.Value is invalid.
* ERROR 4.900000e-324 -1023 - com.amazonaws.services.cloudwatch.model.InvalidParameterValueException: The value 0 for parameter MetricData.member.1.Value is invalid.
* ERROR 1.797693e+308 1023 - com.amazonaws.services.cloudwatch.model.InvalidParameterValueException: The value 179,769,313,486,231,570,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000 for parameter MetricData.member.1.Value is invalid.
* ERROR 4.697085e+108 361 - com.amazonaws.services.cloudwatch.model.InvalidParameterValueException: The value 4,697,085,165,547,666,500,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000 for parameter MetricData.member.1.Value is invalid.
* ERROR 2.128980e-109 -361 - com.amazonaws.services.cloudwatch.model.InvalidParameterValueException: The value 0 for parameter MetricData.member.1.Value is invalid.
* ERROR -4.697085e+108 361 - com.amazonaws.services.cloudwatch.model.InvalidParameterValueException: The value -4,697,085,165,547,666,500,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000 for parameter MetricData.member.1.Value is invalid.
* ERROR -2.128980e-109 -361 - com.amazonaws.services.cloudwatch.model.InvalidParameterValueException: The value -0 for parameter MetricData.member.1.Value is invalid.
* </pre>
*/
//CHECKSTYLE.ON: LineLength
final class CloudWatchValueTest {
private CloudWatchValueTest() {
}
private static final String ACCESS_KEY = "";
private static final String SECRET_KEY = "";
private static final AWSCredentials CREDENTIALS = new BasicAWSCredentials(ACCESS_KEY,
SECRET_KEY);
private static final AmazonCloudWatch CLIENT = new AmazonCloudWatchClient(CREDENTIALS);
private static final double[] SPECIAL_VALUES = {
Double.NaN,
Double.NEGATIVE_INFINITY,
Double.POSITIVE_INFINITY,
Double.MIN_VALUE,
Double.MAX_VALUE,
Math.pow(2.0, 360),
-Math.pow(2.0, 360),
0.0,
1.0,
-1.0
};
private static double[] getValues(double start, double multiplier, int n) {
double[] values = new double[n];
values[0] = start;
for (int i = 1; i < n; ++i) {
values[i] = values[i - 1] * multiplier;
}
return values;
}
private static boolean putValue(String name, long timestamp, double value) {
Date d = new Date(timestamp);
MetricDatum m = new MetricDatum().withMetricName(name).withTimestamp(d).withValue(value);
PutMetricDataRequest req = new PutMetricDataRequest().withNamespace("TEST")
.withMetricData(m);
try {
CLIENT.putMetricData(req);
return true;
} catch (Exception e) {
System.out.printf("ERROR %e %d - %s: %s%n",
value, Math.getExponent(value), e.getClass().getName(), e.getMessage());
return false;
}
}
private static void putValues(String name, long start, double[] values,
boolean ignoreFailures) {
long t = start;
boolean succeeded = true;
for (int i = 0; (succeeded || ignoreFailures) && i < values.length; ++i, t += 60000) {
succeeded = putValue(name, t, values[i]);
}
}
public static void main(String[] args) throws Exception {
if (args.length != 1) {
System.err.println("Usage: cwtest <test-name>");
System.exit(1);
}
long start = System.currentTimeMillis() - (1000 * 60 * 1000);
double[] posLargeValues = getValues(1.0, 2.0, 500);
double[] posSmallValues = getValues(1.0, 0.5, 500);
double[] negLargeValues = getValues(-1.0, 2.0, 500);
double[] negSmallValues = getValues(-1.0, 0.5, 500);
putValues(args[0] + "_special", start, SPECIAL_VALUES, true);
putValues(args[0] + "_pos_large", start, posLargeValues, false);
putValues(args[0] + "_pos_small", start, posSmallValues, false);
putValues(args[0] + "_neg_large", start, negLargeValues, false);
putValues(args[0] + "_neg_small", start, negSmallValues, false);
}
}
| 3,018 |
0 | Create_ds/servo/servo-aws/src/test/java/com/netflix/servo/publish | Create_ds/servo/servo-aws/src/test/java/com/netflix/servo/publish/cloudwatch/CloudWatchMetricObserverTest.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.publish.cloudwatch;
import com.amazonaws.AmazonClientException;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.netflix.servo.Metric;
import com.netflix.servo.tag.BasicTagList;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.util.ArrayList;
import java.util.List;
/**
* CloudWatchMetricObserver tests.
*/
public class CloudWatchMetricObserverTest {
private final CloudWatchMetricObserver observer = new CloudWatchMetricObserver(
"testObserver", "testDomain", new InstanceProfileCredentialsProvider());
private static final int NUM_METRICS = 33;
private static final int VALUE = 10;
/**
* Update.
*/
@Test
public void testUpdate() throws Exception {
List<Metric> metrics = new ArrayList<>(NUM_METRICS);
for (int i = 0; i < NUM_METRICS; i++) {
metrics.add(new Metric("test", BasicTagList.EMPTY, System.currentTimeMillis(), VALUE));
}
try {
observer.update(metrics);
} catch (AmazonClientException e) {
e.printStackTrace();
}
}
/**
* create dimensions.
*/
@Test
public void testCreateDimensions() throws Exception {
}
/**
* create metric datum.
*/
@Test
public void testCreateMetricDatum() throws Exception {
}
/**
* create put request.
*/
@Test
public void testCreatePutRequest() throws Exception {
}
@Test
public void testTruncate() throws Exception {
observer.withTruncateEnabled(true);
Assert.assertEquals(CloudWatchMetricObserver.MAX_VALUE,
observer.truncate(Double.POSITIVE_INFINITY));
Assert.assertEquals(-CloudWatchMetricObserver.MAX_VALUE,
observer.truncate(Double.NEGATIVE_INFINITY));
Assert.assertEquals(CloudWatchMetricObserver.MAX_VALUE,
observer.truncate(Double.MAX_VALUE));
Assert.assertEquals(-CloudWatchMetricObserver.MAX_VALUE,
observer.truncate(-Double.MAX_VALUE));
Assert.assertEquals(0.0, observer.truncate(Double.MIN_VALUE));
Assert.assertEquals(0.0, observer.truncate(-Double.MIN_VALUE));
Assert.assertEquals(1.0, observer.truncate(1.0));
Assert.assertEquals(10000.0, observer.truncate(10000.0));
Assert.assertEquals(0.0, observer.truncate(0.0));
Assert.assertEquals(0.0, observer.truncate(Double.NaN));
observer.withTruncateEnabled(false);
}
}
| 3,019 |
0 | Create_ds/servo/servo-aws/src/test/java/com/netflix/servo | Create_ds/servo/servo-aws/src/test/java/com/netflix/servo/aws/DataSourceTypeToAwsUnitTest.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.aws;
import com.netflix.servo.annotations.DataSourceType;
import org.testng.annotations.Test;
import static org.testng.Assert.assertEquals;
/**
* DataSourceTypeToAwsUnit tests.
* User: gorzell
* Date: 1/9/12
* Time: 6:44 PM
*/
public class DataSourceTypeToAwsUnitTest {
/**
* GetUnit returns the correct unit.
*/
@Test
public void testGetUnit() throws Exception {
String cs = "Count/Second";
String none = "None";
String val = DataSourceTypeToAwsUnit.getUnit(DataSourceType.COUNTER);
assertEquals(val, cs);
val = DataSourceTypeToAwsUnit.getUnit(DataSourceType.GAUGE);
assertEquals(val, none);
val = DataSourceTypeToAwsUnit.getUnit(DataSourceType.INFORMATIONAL);
assertEquals(val, none);
}
}
| 3,020 |
0 | Create_ds/servo/servo-aws/src/test/java/com/netflix/servo/tag | Create_ds/servo/servo-aws/src/test/java/com/netflix/servo/tag/aws/AwsInjectableTagTest.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag.aws;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import java.net.URL;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.testng.Assert.assertTrue;
/**
* AwsInjectableTag tests.
* User: gorzell
* Date: 1/9/12
* Time: 9:11 PM
*/
public class AwsInjectableTagTest {
/**
* getContent from 169.254.169.254.
*/
@BeforeTest(groups = {"aws"})
public void checkEc2() throws Exception {
URL testEc2Url = new URL("http://169.254.169.254/latest/meta-data");
testEc2Url.getContent();
}
/**
* zone comes from a valid region.
*/
@Test(groups = {"aws"})
public void testGetZone() throws Exception {
String zone = AwsInjectableTag.getZone();
assertTrue(zone.startsWith("us-") || zone.startsWith("eu-"));
}
/**
* ami-id looks like a valid ami.
*/
@Test(groups = {"aws"}, enabled = false)
public void testAmiId() throws Exception {
String amiId = AwsInjectableTag.getAmiId();
assertTrue(amiId.startsWith("ami-"));
}
/**
* check instance type.
*/
@Test(groups = {"aws"})
public void testGetInstanceType() throws Exception {
String instanceType = AwsInjectableTag.getInstanceType();
assertTrue(instanceType != null);
}
/**
* localHostname is a domU.
*/
@Test(groups = {"aws"})
public void testGetLocalHostname() throws Exception {
String localHostname = AwsInjectableTag.getLocalHostname();
assertTrue(localHostname.startsWith("domU-"));
}
/**
* privateIp.
*/
@Test(groups = {"aws"})
public void testGetLocalIpv4() throws Exception {
String localIpv4 = AwsInjectableTag.getLocalIpv4();
assertTrue(looksLikeAnIp(localIpv4));
}
/**
* publicHostname.
*/
@Test(groups = {"aws"})
public void testGetPublicHostname() throws Exception {
String publicHostname = AwsInjectableTag.getPublicHostname();
assertTrue(publicHostname.startsWith("ec2-"));
}
/**
* publicIp.
*/
@Test(groups = {"aws"})
public void testGetPublicIpv4() throws Exception {
String publicIpv4 = AwsInjectableTag.getPublicIpv4();
assertTrue(looksLikeAnIp(publicIpv4));
}
/**
* instanceId.
*/
@Test(groups = {"aws"})
public void testGetInstanceId() throws Exception {
String instanceId = AwsInjectableTag.getInstanceId();
assertTrue(instanceId.startsWith("i-"));
}
private static final String IPADDRESS_REGEX = "^([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\."
+ "([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\."
+ "([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\."
+ "([01]?\\d\\d?|2[0-4]\\d|25[0-5])$";
private static final Pattern IP_PATTERN = Pattern.compile(IPADDRESS_REGEX);
/**
* Helper function to check whether a string looks like an IP.
*/
private boolean looksLikeAnIp(String ip) {
Matcher matcher = IP_PATTERN.matcher(ip);
return matcher.matches();
}
}
| 3,021 |
0 | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/publish | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/publish/cloudwatch/CloudWatchMetricObserver.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.publish.cloudwatch;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
import com.amazonaws.services.cloudwatch.AmazonCloudWatchClient;
import com.amazonaws.services.cloudwatch.model.Dimension;
import com.amazonaws.services.cloudwatch.model.MetricDatum;
import com.amazonaws.services.cloudwatch.model.PutMetricDataRequest;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.Metric;
import com.netflix.servo.aws.AwsServiceClients;
import com.netflix.servo.monitor.BasicTimer;
import com.netflix.servo.monitor.Counter;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.monitor.StepCounter;
import com.netflix.servo.monitor.Stopwatch;
import com.netflix.servo.monitor.Timer;
import com.netflix.servo.publish.BaseMetricObserver;
import com.netflix.servo.tag.BasicTag;
import com.netflix.servo.tag.Tag;
import com.netflix.servo.tag.TagList;
import com.netflix.servo.util.Preconditions;
import com.netflix.servo.util.Throwables;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.stream.Collectors;
/**
* Writes observations to Amazon's CloudWatch.
*/
public class CloudWatchMetricObserver extends BaseMetricObserver {
private static final Logger LOG = LoggerFactory.getLogger(CloudWatchMetricObserver.class);
/**
* Experimentally derived value for the largest exponent that can be sent to cloudwatch
* without triggering an InvalidParameterValue exception. See CloudWatchValueTest for the test
* program that was used.
*/
private static final int MAX_EXPONENT = 360;
/**
* Experimentally derived value for the smallest exponent that can be sent to cloudwatch
* without triggering an InvalidParameterValue exception. See CloudWatchValueTest for the test
* program that was used.
*/
private static final int MIN_EXPONENT = -360;
/**
* Maximum value that can be represented in cloudwatch.
*/
static final double MAX_VALUE = Math.pow(2.0, MAX_EXPONENT);
/**
* Number of cloudwatch metrics reported.
*/
private static final Counter METRICS_COUNTER = new StepCounter(
new MonitorConfig.Builder("servo.cloudwatch.metrics").build());
/**
* Number of cloudwatch put calls.
*/
private static final Timer PUTS_TIMER = new BasicTimer(
new MonitorConfig.Builder("servo.cloudwatch.puts").build());
/**
* Number of cloudwatch errors.
*/
private static final MonitorConfig ERRORS_COUNTER_ID =
new MonitorConfig.Builder("servo.cloudwatch.errors").build();
static {
DefaultMonitorRegistry.getInstance().register(METRICS_COUNTER);
DefaultMonitorRegistry.getInstance().register(PUTS_TIMER);
}
private int batchSize;
private boolean truncateEnabled = false;
private final AmazonCloudWatch cloudWatch;
private final String cloudWatchNamespace;
/**
* @param name Unique name of the observer.
* @param namespace Namespace to use in CloudWatch.
* @param credentials Amazon credentials.
* @deprecated use equivalent constructor that accepts an AWSCredentialsProvider.
*/
@Deprecated
public CloudWatchMetricObserver(String name, String namespace, AWSCredentials credentials) {
this(name, namespace, new AmazonCloudWatchClient(credentials));
}
/**
* @param name Unique name of the observer.
* @param namespace Namespace to use in CloudWatch.
* @param credentials Amazon credentials.
* @param batchSize Batch size to send to Amazon. They currently enforce a max of 20.
* @deprecated use equivalent constructor that accepts an AWSCredentialsProvider.
*/
@Deprecated
public CloudWatchMetricObserver(String name, String namespace, AWSCredentials credentials,
int batchSize) {
this(name, namespace, credentials);
this.batchSize = batchSize;
}
/**
* @param name Unique name of the observer.
* @param namespace Namespace to use in CloudWatch.
* @param provider Amazon credentials provider
*/
public CloudWatchMetricObserver(String name, String namespace,
AWSCredentialsProvider provider) {
this(name, namespace, AwsServiceClients.cloudWatch(provider));
}
/**
* @param name Unique name of the observer.
* @param namespace Namespace to use in CloudWatch.
* @param provider Amazon credentials provider.
* @param batchSize Batch size to send to Amazon. They currently enforce a max of 20.
*/
public CloudWatchMetricObserver(String name, String namespace,
AWSCredentialsProvider provider, int batchSize) {
this(name, namespace, provider);
this.batchSize = batchSize;
}
/**
* @param name Unique name of the observer.
* @param namespace Namespace to use in CloudWatch.
* @param cloudWatch AWS cloudwatch.
*/
public CloudWatchMetricObserver(String name, String namespace, AmazonCloudWatch cloudWatch) {
super(name);
this.cloudWatch = cloudWatch;
this.cloudWatchNamespace = namespace;
batchSize = 20;
}
/**
* @param name Unique name of the observer.
* @param namespace Namespace to use in CloudWatch.
* @param cloudWatch AWS cloudwatch.
* @param batchSize Batch size to send to Amazon. They currently enforce a max of 20.
*/
public CloudWatchMetricObserver(String name, String namespace, AmazonCloudWatch cloudWatch,
int batchSize) {
this(name, namespace, cloudWatch);
this.batchSize = batchSize;
}
/**
* @param metrics The list of metrics you want to send to CloudWatch
*/
@Override
public void updateImpl(List<Metric> metrics) {
Preconditions.checkNotNull(metrics, "metrics");
List<Metric> batch = new ArrayList<>(batchSize);
for (final Metric m : metrics) {
if (m.hasNumberValue()) {
batch.add(m);
if (batch.size() % batchSize == 0) {
putMetricData(batch);
batch.clear();
}
}
}
if (!batch.isEmpty()) {
putMetricData(batch);
}
}
private void putMetricData(List<Metric> batch) {
METRICS_COUNTER.increment(batch.size());
final Stopwatch s = PUTS_TIMER.start();
try {
cloudWatch.putMetricData(createPutRequest(batch));
} catch (AmazonServiceException e) {
final Tag error = new BasicTag("error", e.getErrorCode());
DynamicCounter.increment(ERRORS_COUNTER_ID.withAdditionalTag(error));
LOG.error("Error while submitting data for metrics : {}", batch, e);
} catch (Exception e) {
final Tag error = new BasicTag("error", e.getClass().getSimpleName());
DynamicCounter.increment(ERRORS_COUNTER_ID.withAdditionalTag(error));
LOG.error("Error while submitting data for metrics : {}", batch, e);
} catch (Error e) {
final Tag error = new BasicTag("error", e.getClass().getSimpleName());
DynamicCounter.increment(ERRORS_COUNTER_ID.withAdditionalTag(error));
throw Throwables.propagate(e);
} finally {
s.stop();
}
}
PutMetricDataRequest createPutRequest(List<Metric> batch) {
List<MetricDatum> datumList = batch.stream().map(this::createMetricDatum)
.collect(Collectors.toList());
return new PutMetricDataRequest().withNamespace(cloudWatchNamespace)
.withMetricData(datumList);
}
MetricDatum createMetricDatum(Metric metric) {
MetricDatum metricDatum = new MetricDatum();
return metricDatum.withMetricName(metric.getConfig().getName())
.withDimensions(createDimensions(metric.getConfig().getTags()))
.withUnit("None")//DataSourceTypeToAwsUnit.getUnit(metric.))
.withTimestamp(new Date(metric.getTimestamp()))
.withValue(truncate(metric.getNumberValue()));
//TODO Need to convert into reasonable units based on DataType
}
/**
* Adjust a double value so it can be successfully written to cloudwatch. This involves capping
* values with large exponents to an experimentally determined max value and converting values
* with large negative exponents to 0. In addition, NaN values will be converted to 0.
*/
Double truncate(Number numberValue) {
// http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html
double doubleValue = numberValue.doubleValue();
if (truncateEnabled) {
final int exponent = Math.getExponent(doubleValue);
if (Double.isNaN(doubleValue)) {
doubleValue = 0.0;
} else if (exponent >= MAX_EXPONENT) {
doubleValue = (doubleValue < 0.0) ? -MAX_VALUE : MAX_VALUE;
} else if (exponent <= MIN_EXPONENT) {
doubleValue = 0.0;
}
}
return doubleValue;
}
List<Dimension> createDimensions(TagList tags) {
List<Dimension> dimensionList = new ArrayList<>(tags.size());
for (Tag tag : tags) {
dimensionList.add(new Dimension().withName(tag.getKey()).withValue(tag.getValue()));
}
return dimensionList;
}
public CloudWatchMetricObserver withTruncateEnabled(boolean truncateEnabled) {
this.truncateEnabled = truncateEnabled;
return this;
}
}
| 3,022 |
0 | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/publish | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/publish/cloudwatch/package-info.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* <p/>
* Publishing Metrics to Amazon CloudWatch.
*/
/**
* Publishing Metrics to Amazon CloudWatch.
*/
package com.netflix.servo.publish.cloudwatch;
| 3,023 |
0 | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/aws/AwsServiceClients.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.aws;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClient;
import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
import com.amazonaws.services.cloudwatch.AmazonCloudWatchClient;
/**
* Static helpers for constructing configured AWS service clients.
*/
public final class AwsServiceClients {
private AwsServiceClients() {
}
/**
* Get a CloudWatch client whose endpoint is configured based on properties.
*/
public static AmazonCloudWatch cloudWatch(AWSCredentialsProvider credentials) {
AmazonCloudWatch client = new AmazonCloudWatchClient(credentials);
client.setEndpoint(System.getProperty(AwsPropertyKeys.AWS_CLOUD_WATCH_END_POINT.getBundle(),
"monitoring.amazonaws.com"));
return client;
}
/**
* Get an AutoScaling client whose endpoint is configured based on properties.
*/
public static AmazonAutoScaling autoScaling(AWSCredentials credentials) {
AmazonAutoScaling client = new AmazonAutoScalingClient(credentials);
client.setEndpoint(System.getProperty(
AwsPropertyKeys.AWS_AUTO_SCALING_END_POINT.getBundle(),
"autoscaling.amazonaws.com"));
return client;
}
}
| 3,024 |
0 | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/aws/DataSourceTypeToAwsUnit.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.aws;
import com.amazonaws.services.cloudwatch.model.StandardUnit;
import com.netflix.servo.annotations.DataSourceType;
/**
* Conversion from internal data types to Amazon Units.
*/
public final class DataSourceTypeToAwsUnit {
private DataSourceTypeToAwsUnit() {
}
public static String getUnit(DataSourceType dataSourceType) {
switch (dataSourceType) {
case COUNTER:
return StandardUnit.CountSecond.toString();
case GAUGE:
return StandardUnit.None.toString();
case INFORMATIONAL:
return StandardUnit.None.toString();
default:
return StandardUnit.None.toString();
}
}
}
| 3,025 |
0 | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/aws/package-info.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* <p/>
* Code related to Amazon Web Services (AWS).
*/
/**
* Code related to Amazon Web Services (AWS).
*/
package com.netflix.servo.aws;
| 3,026 |
0 | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/aws/AwsPropertyKeys.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.aws;
/**
* Enum key values for setting aws related properties.
*/
public enum AwsPropertyKeys {
AWS_CREDENTIALS_FILE("com.netflix.servo.aws.credentialsFile"),
AWS_AUTO_SCALING_END_POINT("com.netflix.servo.aws.endpoint.autoscaling"),
AWS_CLOUD_WATCH_END_POINT("com.netflix.servo.aws.endpoint.cloudwatch");
private final String bundle;
/**
* Constructor responsible to instantiate the type of bundle.
*/
AwsPropertyKeys(String bundle) {
this.bundle = bundle;
}
public String getBundle() {
return bundle;
}
}
| 3,027 |
0 | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/aws | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/aws/constants/Namespace.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.aws.constants;
/**
* Constants for the namespaces aws publish their metrics to cloudwatch under.
*/
public enum Namespace {
AWS_EBS("AWS/EBS"),
AWS_EC2("AWS/EC2"),
AWS_RDS("AWS/RDS"),
AWS_SQS("AWS/SQS"),
AWS_SNS("AWS/SNS"),
AWS_AUTOSCALING("AWS/AutoScaling"),
AWS_ELB("AWS/ELB");
private final String value;
Namespace(String value) {
this.value = value;
}
public String getValue() {
return this.value;
}
}
| 3,028 |
0 | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/aws | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/aws/constants/Dimensions.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.aws.constants;
/**
* Constants related to the AWS API, and what the labels they use for Dimensions
* across their services.
* <p/>
* http://docs.amazonwebservices.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html
*/
public enum Dimensions {
//EC2
AMI_IMAGE_ID("ImageId"),
INSTANCE_ID("InstanceId"),
INSTANCE_TYPE("InstanceType"),
//EBS
VOLUME_ID("VolumeId"),
//RDS
DB_INSTANCE_ID("DBInstanceIdentifier"),
DB_CLASS("DatabaseClass"),
ENGINE_NAME("EngineName"),
//SNS
TOPIC_NAME("TopicName"),
//SQS
QUEUE_NAME("QueueName"),
//ASG Also can filter EC2 metrics
AUTOSCALING_GROUP("AutoScalingGroupName"),
//ELB
LOAD_BALANCER_NAME("LoadBalancerName"),
AVAILABILITY_ZONE("AvailabilityZone");
private final String awsString;
Dimensions(String awsString) {
this.awsString = awsString;
}
public String getAwsString() {
return awsString;
}
}
| 3,029 |
0 | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/aws | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/aws/constants/package-info.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* <p/>
* Constants related to Amazon Web Services (AWS).
*/
/**
* Constants related to Amazon Web Services (AWS).
*/
package com.netflix.servo.aws.constants;
| 3,030 |
0 | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/tag | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/tag/aws/AwsInjectableTag.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.servo.tag.aws;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.auth.PropertiesCredentials;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.model.AutoScalingInstanceDetails;
import com.amazonaws.services.autoscaling.model.DescribeAutoScalingInstancesRequest;
import com.netflix.servo.aws.AwsPropertyKeys;
import com.netflix.servo.aws.AwsServiceClients;
import com.netflix.servo.aws.constants.Dimensions;
import com.netflix.servo.tag.Tag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.List;
/**
* Group of Tags who's values will be dynamically set at runtime
* based on amazon api calls.
* <p/>
* The keys for and values of these Tags are consistent with AWS naming.
*/
public enum AwsInjectableTag implements Tag {
AUTOSCALE_GROUP(Dimensions.AUTOSCALING_GROUP.getAwsString(), getAutoScaleGroup()),
INSTANCE_ID(Dimensions.INSTANCE_ID.getAwsString(), getInstanceId()),
AVAILABILITY_ZONE(Dimensions.AVAILABILITY_ZONE.getAwsString(), getZone()),
AMI_ID(Dimensions.AMI_IMAGE_ID.getAwsString(), getAmiId()),
INSTANCE_TYPE(Dimensions.INSTANCE_TYPE.getAwsString(), getInstanceType()),
LOCAL_HOSTNAME("local-hostname", getLocalHostname()),
LOCAL_IPV4("local-ipv4", getLocalIpv4()),
PUBLIC_HOSTNAME("public-hostname", getPublicHostname()),
PUBLIC_IPV4("public-ipv4", getPublicIpv4());
private static final String METADATA_URL = "http://instance-data/latest/meta-data";
private static final String UNDEFINED = "undefined";
private static Logger getLogger() {
return LoggerFactory.getLogger(AwsInjectableTag.class);
}
private final String key;
private final String value;
AwsInjectableTag(String key, String val) {
this.key = key;
this.value = val;
}
/**
* @return Amazon compliant string representation of the key.
*/
@Override
public String getKey() {
return key;
}
/**
* @return value as determined at runtime for the key.
*/
@Override
public String getValue() {
return value;
}
@Override
public String tagString() {
return key + "=" + value;
}
static String getAutoScaleGroup() {
try {
String credFileProperty = System.getProperty(
AwsPropertyKeys.AWS_CREDENTIALS_FILE.getBundle());
AWSCredentials credentials;
if (credFileProperty != null) {
credentials = new PropertiesCredentials(new File(credFileProperty));
} else {
credentials = new DefaultAWSCredentialsProviderChain().getCredentials();
}
AmazonAutoScaling autoScalingClient = AwsServiceClients.autoScaling(credentials);
List<AutoScalingInstanceDetails> autoScalingInstances = autoScalingClient.describeAutoScalingInstances(
new DescribeAutoScalingInstancesRequest().withInstanceIds(getInstanceId())).getAutoScalingInstances();
return autoScalingInstances.isEmpty() ? UNDEFINED : autoScalingInstances.get(0).getAutoScalingGroupName();
} catch (Exception e) {
getLogger().error("Unable to get ASG name.", e);
return UNDEFINED;
}
}
static String getInstanceId() {
return getUrlValue("/instance-id");
}
@SuppressWarnings("PMD")
static String getUrlValue(String path) {
BufferedReader reader = null;
try {
URL url = new URL(METADATA_URL + path);
reader = new BufferedReader(new InputStreamReader(url.openStream(),
Charset.forName("UTF-8")));
return reader.readLine();
} catch (Exception e) {
getLogger().warn("Unable to read value from AWS metadata URL", e);
return UNDEFINED;
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException e) {
getLogger().info("Ignoring failure while closing the stream", e);
}
}
}
static String getZone() {
return getUrlValue("/placement/availability-zone");
}
static String getAmiId() {
return getUrlValue("/ami-id");
}
static String getInstanceType() {
return getUrlValue("/instance-type");
}
static String getLocalHostname() {
return getUrlValue("/local-hostname");
}
static String getLocalIpv4() {
return getUrlValue("/local-ipv4");
}
static String getPublicHostname() {
return getUrlValue("/public-hostname");
}
static String getPublicIpv4() {
return getUrlValue("/public-ipv4");
}
}
| 3,031 |
0 | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/tag | Create_ds/servo/servo-aws/src/main/java/com/netflix/servo/tag/aws/package-info.java | /**
* Copyright 2013 Netflix, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* <p/>
* Amazon Web Services (AWS) related tags.
*/
/**
* Amazon Web Services (AWS) related tags.
*/
package com.netflix.servo.tag.aws;
| 3,032 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/StringMarshaller.java | package com.netflix.concurrency.limits.grpc;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import com.google.common.base.Charsets;
import com.google.common.io.CharStreams;
import io.grpc.MethodDescriptor.Marshaller;
public final class StringMarshaller implements Marshaller<String> {
public static final StringMarshaller INSTANCE = new StringMarshaller();
@Override
public InputStream stream(String value) {
return new ByteArrayInputStream(value.getBytes(Charsets.UTF_8));
}
@Override
public String parse(InputStream stream) {
try {
return CharStreams.toString(new InputStreamReader(stream, Charsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 3,033 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/ConcurrencyLimitServerInterceptorTest.java | package com.netflix.concurrency.limits.grpc.server;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.grpc.StringMarshaller;
import com.netflix.concurrency.limits.grpc.mockito.OptionalResultCaptor;
import com.netflix.concurrency.limits.limiter.SimpleLimiter;
import com.netflix.concurrency.limits.spectator.SpectatorMetricRegistry;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Meter;
import com.netflix.spectator.api.Timer;
import io.grpc.CallOptions;
import io.grpc.Channel;
import io.grpc.ClientCall;
import io.grpc.Metadata;
import io.grpc.MethodDescriptor;
import io.grpc.MethodDescriptor.MethodType;
import io.grpc.Server;
import io.grpc.ServerInterceptors;
import io.grpc.ServerServiceDefinition;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.grpc.netty.NettyChannelBuilder;
import io.grpc.netty.NettyServerBuilder;
import io.grpc.stub.ClientCalls;
import io.grpc.stub.ServerCalls;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.mockito.Mockito;
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.TimeUnit;
public class ConcurrencyLimitServerInterceptorTest {
@Rule
public TestName testName = new TestName();
private static final MethodDescriptor<String, String> METHOD_DESCRIPTOR = MethodDescriptor.<String, String>newBuilder()
.setType(MethodType.UNARY)
.setFullMethodName("service/method")
.setRequestMarshaller(StringMarshaller.INSTANCE)
.setResponseMarshaller(StringMarshaller.INSTANCE)
.build();
private DefaultRegistry registry = new DefaultRegistry();
private Server server;
private Channel channel;
Limiter<GrpcServerRequestContext> limiter;
OptionalResultCaptor<Limiter.Listener> listener;
@Before
public void beforeEachTest() {
limiter = Mockito.spy(SimpleLimiter.newBuilder()
.named(testName.getMethodName())
.metricRegistry(new SpectatorMetricRegistry(registry, registry.createId("unit.test.limiter")))
.build());
listener = OptionalResultCaptor.forClass(Limiter.Listener.class);
Mockito.doAnswer(listener).when(limiter).acquire(Mockito.any());
}
@After
public void afterEachTest() {
if (server != null) {
server.shutdown();
}
System.out.println("COUNTERS:");
registry.counters().forEach(t -> System.out.println(" " + t.id() + " " + t.count()));
System.out.println("DISTRIBUTIONS:");
registry.distributionSummaries().forEach(t -> System.out.println(" " + t.id() + " " + t.count() + " " + t.totalAmount()));
}
private void startServer(ServerCalls.UnaryMethod<String, String> method) {
try {
server = NettyServerBuilder.forPort(0)
.addService(ServerInterceptors.intercept(
ServerServiceDefinition.builder("service")
.addMethod(METHOD_DESCRIPTOR, ServerCalls.asyncUnaryCall(method))
.build(),
ConcurrencyLimitServerInterceptor.newBuilder(limiter)
.build())
)
.build()
.start();
channel = NettyChannelBuilder.forAddress("localhost", server.getPort())
.usePlaintext(true)
.build();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Test
public void releaseOnSuccess() {
// Setup server
startServer((req, observer) -> {
observer.onNext("response");
observer.onCompleted();
});
ClientCalls.blockingUnaryCall(channel, METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo");
Mockito.verify(limiter, Mockito.times(1)).acquire(Mockito.isA(GrpcServerRequestContext.class));
Mockito.verify(listener.getResult().get(), Mockito.timeout(1000).times(1)).onSuccess();
verifyCounts(0, 0, 1, 0);
}
@Test
public void releaseOnError() {
// Setup server
startServer((req, observer) -> {
observer.onError(Status.INVALID_ARGUMENT.asRuntimeException());
});
try {
ClientCalls.blockingUnaryCall(channel, METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo");
Assert.fail("Should have failed with UNKNOWN error");
} catch (StatusRuntimeException e) {
Assert.assertEquals(Status.Code.INVALID_ARGUMENT, e.getStatus().getCode());
}
// Verify
Mockito.verify(limiter, Mockito.times(1)).acquire(Mockito.isA(GrpcServerRequestContext.class));
verifyCounts(0, 0, 1, 0);
}
@Test
public void releaseOnUncaughtException() throws IOException {
// Setup server
startServer((req, observer) -> {
throw new RuntimeException("failure");
});
try {
ClientCalls.blockingUnaryCall(channel, METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo");
Assert.fail("Should have failed with UNKNOWN error");
} catch (StatusRuntimeException e) {
Assert.assertEquals(Status.Code.UNKNOWN, e.getStatus().getCode());
}
// Verify
Mockito.verify(limiter, Mockito.times(1)).acquire(Mockito.isA(GrpcServerRequestContext.class));
Mockito.verify(listener.getResult().get(), Mockito.timeout(1000).times(1)).onIgnore();
verifyCounts(0, 1, 0, 0);
}
@Test
public void releaseOnCancellation() {
// Setup server
startServer((req, observer) -> {
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
observer.onNext("delayed_response");
observer.onCompleted();
});
ListenableFuture<String> future = ClientCalls.futureUnaryCall(channel.newCall(METHOD_DESCRIPTOR, CallOptions.DEFAULT), "foo");
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
future.cancel(true);
// Verify
Mockito.verify(limiter, Mockito.times(1)).acquire(Mockito.isA(GrpcServerRequestContext.class));
Mockito.verify(listener.getResult().get(), Mockito.times(0)).onIgnore();
Mockito.verify(listener.getResult().get(), Mockito.timeout(2000).times(1)).onSuccess();
verifyCounts(0, 0, 1, 0);
}
@Test
public void releaseOnDeadlineExceeded() {
// Setup server
startServer((req, observer) -> {
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
observer.onNext("delayed_response");
observer.onCompleted();
});
try {
ClientCalls.blockingUnaryCall(channel.newCall(METHOD_DESCRIPTOR, CallOptions.DEFAULT.withDeadlineAfter(1, TimeUnit.SECONDS)), "foo");
} catch (StatusRuntimeException e) {
Assert.assertEquals(Status.Code.DEADLINE_EXCEEDED, e.getStatus().getCode());
}
// Verify
Mockito.verify(limiter, Mockito.times(1)).acquire(Mockito.isA(GrpcServerRequestContext.class));
Mockito.verify(listener.getResult().get(), Mockito.times(0)).onIgnore();
Mockito.verify(listener.getResult().get(), Mockito.timeout(2000).times(1)).onSuccess();
verifyCounts(0, 0, 1, 0);
}
public void verifyCounts(int dropped, int ignored, int success, int rejected) {
try {
TimeUnit.SECONDS.sleep(1);
} catch (InterruptedException e) {
}
Assert.assertEquals(dropped, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "dropped").count());
Assert.assertEquals(ignored, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "ignored").count());
Assert.assertEquals(success, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "success").count());
Assert.assertEquals(rejected, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "rejected").count());
}
}
| 3,034 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/PartitionedExample.java | package com.netflix.concurrency.limits.grpc.server.example;
import com.netflix.concurrency.limits.grpc.server.GrpcServerLimiterBuilder;
import com.netflix.concurrency.limits.limit.Gradient2Limit;
import com.netflix.concurrency.limits.limit.WindowedLimit;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
public class PartitionedExample {
public static void main(String[] args) throws IOException, ExecutionException, InterruptedException {
final Gradient2Limit limit = Gradient2Limit.newBuilder()
.build();
// Create a server
final TestServer server = TestServer.newBuilder()
.concurrency(2)
.lognormal(20, 1, TimeUnit.MINUTES)
.limiter(
new GrpcServerLimiterBuilder()
.partitionByHeader(Driver.ID_HEADER)
.partition("1", 1.0)
.partition("2", 0.0)
// .partition("3", 0.0)
// .partitionRejectDelay("2", 1000, TimeUnit.MILLISECONDS)
// .partitionRejectDelay("3", 1000, TimeUnit.MILLISECONDS)
.limit(WindowedLimit.newBuilder()
.minWindowTime(1, TimeUnit.SECONDS)
.windowSize(10)
.build(limit))
.build()
)
.build();
final LatencyCollector latency = new LatencyCollector();
final Driver driver1 = Driver.newBuilder()
.id("1")
.exponentialRps(50, 60, TimeUnit.SECONDS)
.latencyAccumulator(latency)
.runtime(1, TimeUnit.HOURS)
.port(server.getPort())
.build();
final Driver driver2 = Driver.newBuilder()
.id("2")
.exponentialRps(50, 60, TimeUnit.SECONDS)
.exponentialRps(100, 60, TimeUnit.SECONDS)
.latencyAccumulator(latency)
.runtime(1, TimeUnit.HOURS)
.port(server.getPort())
.build();
final Driver driver3 = Driver.newBuilder()
.id("3")
.exponentialRps(50, 60, TimeUnit.SECONDS)
.latencyAccumulator(latency)
.runtime(1, TimeUnit.HOURS)
.port(server.getPort())
.build();
// Report progress
final AtomicInteger counter = new AtomicInteger(0);
System.out.println("iteration, limit, live, batch, live, batch, latency, shortRtt, longRtt");
// System.out.println("iteration, limit, 70%, 20%, 10%, 70%, 20%, 10%, latency, shortRtt, longRtt");
Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> {
System.out.println(MessageFormat.format(
"{0,number,#}, {1,number,#}, {2,number,#}, {3,number,#}, {4,number,#}, {5,number,#}, {6,number,#}, {7,number,#}, {8,number,#}",
counter.incrementAndGet(),
limit.getLimit(),
driver1.getAndResetSuccessCount(),
driver2.getAndResetSuccessCount(),
// driver3.getAndResetSuccessCount(),
driver1.getAndResetDropCount(),
driver2.getAndResetDropCount(),
// driver3.getAndResetDropCount(),
TimeUnit.NANOSECONDS.toMillis(latency.getAndReset()),
limit.getLastRtt(TimeUnit.MILLISECONDS),
limit.getRttNoLoad(TimeUnit.MILLISECONDS)
)) ;
}, 1, 1, TimeUnit.SECONDS);
CompletableFuture.allOf(
driver1.runAsync()
, driver2.runAsync()
// , driver3.runAsync()
).get();
}
}
| 3,035 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/LatencyCollector.java | package com.netflix.concurrency.limits.grpc.server.example;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
public class LatencyCollector implements Consumer<Long> {
private static class Metrics {
long count;
long total;
public Metrics() {
this(0, 0);
}
public Metrics(long count, long total) {
this.count = count;
this.total = total;
}
public long average() {
if (this.count == 0)
return 0;
return this.total / this.count;
}
}
AtomicReference<Metrics> foo = new AtomicReference<Metrics>(new Metrics());
@Override
public void accept(Long sample) {
foo.getAndUpdate(current -> new Metrics(current.count + 1, current.total + sample));
}
public long getAndReset() {
return foo.getAndSet(new Metrics()).average();
}
}
| 3,036 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/Driver.java | package com.netflix.concurrency.limits.grpc.server.example;
import com.google.common.util.concurrent.Uninterruptibles;
import com.netflix.concurrency.limits.grpc.client.ConcurrencyLimitClientInterceptor;
import com.netflix.concurrency.limits.limit.FixedLimit;
import com.netflix.concurrency.limits.limiter.SimpleLimiter;
import io.grpc.CallOptions;
import io.grpc.Channel;
import io.grpc.ClientInterceptors;
import io.grpc.Metadata;
import io.grpc.netty.NettyChannelBuilder;
import io.grpc.stub.ClientCalls;
import io.grpc.stub.MetadataUtils;
import io.grpc.stub.StreamObserver;
import org.apache.commons.math3.distribution.ExponentialDistribution;
import org.apache.commons.math3.distribution.NormalDistribution;
import org.apache.commons.math3.distribution.UniformRealDistribution;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
import java.util.function.Supplier;
public class Driver {
public static final Metadata.Key<String> ID_HEADER = Metadata.Key.of("id", Metadata.ASCII_STRING_MARSHALLER);
private interface Segment {
long duration();
long nextDelay();
String name();
}
static public Builder newBuilder() {
return new Builder();
}
public static class Builder {
private List<Driver.Segment> segments = new ArrayList<>();
private int port;
private long runtimeSeconds;
private Consumer<Long> latencyAccumulator;
private String id = "";
public Builder normal(double mean, double sd, long duration, TimeUnit units) {
final NormalDistribution distribution = new NormalDistribution(mean, sd);
return add("normal(" + mean + ")", () -> (long)distribution.sample(), duration, units);
}
public Builder uniform(double lower, double upper, long duration, TimeUnit units) {
final UniformRealDistribution distribution = new UniformRealDistribution(lower, upper);
return add("uniform(" + lower + "," + upper + ")", () -> (long)distribution.sample(), duration, units);
}
public Builder exponential(double mean, long duration, TimeUnit units) {
final ExponentialDistribution distribution = new ExponentialDistribution(mean);
return add("exponential(" + mean + ")", () -> (long)distribution.sample(), duration, units);
}
public Builder exponentialRps(double rps, long duration, TimeUnit units) {
return exponential(1000.0 / rps, duration, units);
}
public Builder slience(long duration, TimeUnit units) {
return add("slience()", () -> units.toMillis(duration), duration, units);
}
public Builder id(String id) {
this.id = id;
return this;
}
public Builder port(int port) {
this.port = port;
return this;
}
public Builder latencyAccumulator(Consumer<Long> consumer) {
this.latencyAccumulator = consumer;
return this;
}
public Builder runtime(long duration, TimeUnit units) {
this.runtimeSeconds = units.toNanos(duration);
return this;
}
public Builder add(String name, Supplier<Long> delaySupplier, long duration, TimeUnit units) {
segments.add(new Segment() {
@Override
public long duration() {
return units.toNanos(duration);
}
@Override
public long nextDelay() {
return delaySupplier.get();
}
@Override
public String name() {
return name;
}
});
return this;
}
public Driver build() {
return new Driver(this);
}
}
private final List<Segment> segments;
private final Channel channel;
private final long runtime;
private final Consumer<Long> latencyAccumulator;
private final AtomicInteger successCounter = new AtomicInteger(0);
private final AtomicInteger dropCounter = new AtomicInteger(0);
public Driver(Builder builder) {
this.segments = builder.segments;
this.runtime = builder.runtimeSeconds;
this.latencyAccumulator = builder.latencyAccumulator;
Metadata metadata = new Metadata();
metadata.put(ID_HEADER, builder.id);
this.channel = ClientInterceptors.intercept(NettyChannelBuilder.forTarget("localhost:" + builder.port)
.usePlaintext(true)
.build(),
MetadataUtils.newAttachHeadersInterceptor(metadata));
}
public int getAndResetSuccessCount() { return successCounter.getAndSet(0); }
public int getAndResetDropCount() { return dropCounter.getAndSet(0); }
public CompletableFuture<Void> runAsync() {
return CompletableFuture.runAsync(this::run, Executors.newSingleThreadExecutor());
}
public void run() {
long endTime = System.nanoTime() + this.runtime;
while (true) {
for (Driver.Segment segment : segments) {
long segmentEndTime = System.nanoTime() + segment.duration();
while (true) {
long currentTime = System.nanoTime();
if (currentTime > endTime) {
return;
}
if (currentTime > segmentEndTime) {
break;
}
long startTime = System.nanoTime();
Uninterruptibles.sleepUninterruptibly(Math.max(0, segment.nextDelay()), TimeUnit.MILLISECONDS);
ClientCalls.asyncUnaryCall(channel.newCall(TestServer.METHOD_DESCRIPTOR, CallOptions.DEFAULT.withWaitForReady()), "request",
new StreamObserver<String>() {
@Override
public void onNext(String value) {
}
@Override
public void onError(Throwable t) {
dropCounter.incrementAndGet();
}
@Override
public void onCompleted() {
latencyAccumulator.accept(System.nanoTime() - startTime);
successCounter.incrementAndGet();
}
});
}
}
}
}
} | 3,037 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/Example.java | package com.netflix.concurrency.limits.grpc.server.example;
import com.netflix.concurrency.limits.grpc.server.GrpcServerLimiterBuilder;
import com.netflix.concurrency.limits.limit.Gradient2Limit;
import com.netflix.concurrency.limits.limit.GradientLimit;
import com.netflix.concurrency.limits.limit.WindowedLimit;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
public class Example {
public static void main(String[] args) throws IOException {
final Gradient2Limit limit = Gradient2Limit.newBuilder().build();
// Create a server
final TestServer server = TestServer.newBuilder()
.concurrency(2)
.lognormal(20, 1, TimeUnit.MINUTES)
.limiter(
new GrpcServerLimiterBuilder()
.limit(WindowedLimit.newBuilder()
.minWindowTime(1, TimeUnit.SECONDS)
.windowSize(10)
.build(limit))
.build()
)
.build();
final LatencyCollector latency = new LatencyCollector();
final Driver driver = Driver.newBuilder()
.exponentialRps(50, 100, TimeUnit.SECONDS)
.exponentialRps(90, 100, TimeUnit.SECONDS)
.exponentialRps(200, 100, TimeUnit.SECONDS)
.exponentialRps(100, 100, TimeUnit.SECONDS)
.latencyAccumulator(latency)
.runtime(1, TimeUnit.HOURS)
.port(server.getPort())
.build();
// Report progress
final AtomicInteger counter = new AtomicInteger(0);
System.out.println("iteration, limit, success, drop, latency, shortRtt, longRtt");
Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> {
try {
System.out.println(MessageFormat.format("{0,number,#}, {1,number,#}, {2,number,#}, {3,number,#}, {4,number,#}, {5,number,#}, {6,number,#}",
counter.incrementAndGet(),
limit.getLimit(),
driver.getAndResetSuccessCount(),
driver.getAndResetDropCount(),
TimeUnit.NANOSECONDS.toMillis(latency.getAndReset()),
limit.getLastRtt(TimeUnit.MILLISECONDS),
limit.getRttNoLoad(TimeUnit.MILLISECONDS)
));
} catch (Exception e) {
e.printStackTrace();
}
}, 1, 1, TimeUnit.SECONDS);
// Create a client
driver.run();
}
}
| 3,038 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/TestServer.java | package com.netflix.concurrency.limits.grpc.server.example;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import org.apache.commons.math3.distribution.ExponentialDistribution;
import org.apache.commons.math3.distribution.LogNormalDistribution;
import com.google.common.util.concurrent.Uninterruptibles;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.grpc.StringMarshaller;
import com.netflix.concurrency.limits.grpc.server.ConcurrencyLimitServerInterceptor;
import com.netflix.concurrency.limits.grpc.server.GrpcServerRequestContext;
import io.grpc.MethodDescriptor;
import io.grpc.MethodDescriptor.MethodType;
import io.grpc.Server;
import io.grpc.ServerCallHandler;
import io.grpc.ServerInterceptors;
import io.grpc.ServerServiceDefinition;
import io.grpc.Status;
import io.grpc.netty.NettyServerBuilder;
import io.grpc.stub.ServerCalls;
import io.grpc.stub.ServerCalls.UnaryMethod;
import io.grpc.stub.StreamObserver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestServer {
private static final Logger LOG = LoggerFactory.getLogger(TestServer.class);
public static final MethodDescriptor<String, String> METHOD_DESCRIPTOR = MethodDescriptor.<String, String>newBuilder()
.setType(MethodType.UNARY)
.setFullMethodName("service/method")
.setRequestMarshaller(StringMarshaller.INSTANCE)
.setResponseMarshaller(StringMarshaller.INSTANCE)
.build();
private interface Segment {
long duration();
long latency();
String name();
}
public static Builder newBuilder() {
return new Builder();
}
public static class Builder {
private List<TestServer.Segment> segments = new ArrayList<>();
private int concurrency = 2;
private Limiter<GrpcServerRequestContext> limiter;
public Builder limiter(Limiter<GrpcServerRequestContext> limiter) {
this.limiter = limiter;
return this;
}
public Builder concurrency(int concurrency) {
this.concurrency = concurrency;
return this;
}
public Builder exponential(double mean, long duration, TimeUnit units) {
final ExponentialDistribution distribution = new ExponentialDistribution(mean);
return add("exponential(" + mean + ")", () -> (long)distribution.sample(), duration, units);
}
public Builder lognormal(long mean, long duration, TimeUnit units) {
final LogNormalDistribution distribution = new LogNormalDistribution(3.0,1.0);
final double distmean = distribution.getNumericalMean();
return add("lognormal(" + mean + ")", () -> (long)(distribution.sample() * mean / distmean), duration, units);
}
public Builder slience(long duration, TimeUnit units) {
return add("slience()", () -> units.toMillis(duration), duration, units);
}
public Builder add(String name, Supplier<Long> latencySupplier, long duration, TimeUnit units) {
segments.add(new Segment() {
@Override
public long duration() {
return units.toNanos(duration);
}
@Override
public long latency() {
return latencySupplier.get();
}
@Override
public String name() {
return name;
}
});
return this;
}
public TestServer build() throws IOException {
return new TestServer(this);
}
}
private final Semaphore semaphore;
private final Server server;
private TestServer(final Builder builder) throws IOException {
this.semaphore = new Semaphore(builder.concurrency, true);
ServerCallHandler<String, String> handler = ServerCalls.asyncUnaryCall(new UnaryMethod<String, String>() {
volatile int segment = 0;
{
Executors.newSingleThreadExecutor().execute(() -> {
while (true) {
Segment s = builder.segments.get(0);
Uninterruptibles.sleepUninterruptibly(s.duration(), TimeUnit.NANOSECONDS);
segment = segment++ % builder.segments.size();
}
});
}
@Override
public void invoke(String req, StreamObserver<String> observer) {
try {
long delay = builder.segments.get(0).latency();
semaphore.acquire();
TimeUnit.MILLISECONDS.sleep(delay);
observer.onNext("response");
observer.onCompleted();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
observer.onError(Status.UNKNOWN.asRuntimeException());
} finally {
semaphore.release();
}
}
});
this.server = NettyServerBuilder.forPort(0)
.addService(ServerInterceptors.intercept(ServerServiceDefinition.builder("service")
.addMethod(METHOD_DESCRIPTOR, handler) // Rate = Limit / Latency = 2 / 0.02 = 100
.build(), ConcurrencyLimitServerInterceptor.newBuilder(builder.limiter)
.build()
))
.build()
.start();
}
public int getPort() {
return server.getPort();
}
}
| 3,039 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/mockito/OptionalResultCaptor.java | package com.netflix.concurrency.limits.grpc.mockito;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.util.Optional;
public class OptionalResultCaptor<T> implements Answer<Optional<T>> {
public static <T> OptionalResultCaptor<T> forClass(Class<T> type) {
return new OptionalResultCaptor<T>();
}
private Optional<T> result = null;
public Optional<T> getResult() {
return result;
}
@Override
public Optional<T> answer(InvocationOnMock invocationOnMock) throws Throwable {
result = (Optional<T>)invocationOnMock.callRealMethod();
if (result.isPresent()) {
result = result.map(Mockito::spy);
}
return result;
}
}
| 3,040 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc | Create_ds/concurrency-limits/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/client/ConcurrencyLimitClientInterceptorTest.java | package com.netflix.concurrency.limits.grpc.client;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.grpc.StringMarshaller;
import io.grpc.CallOptions;
import io.grpc.Channel;
import io.grpc.MethodDescriptor;
import io.grpc.MethodDescriptor.MethodType;
import io.grpc.Server;
import io.grpc.ServerServiceDefinition;
import io.grpc.netty.NettyChannelBuilder;
import io.grpc.netty.NettyServerBuilder;
import io.grpc.stub.ClientCalls;
import io.grpc.stub.ServerCalls;
import java.io.IOException;
import java.util.concurrent.Executors;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.junit.Ignore;
import org.junit.Test;
public class ConcurrencyLimitClientInterceptorTest {
private static final MethodDescriptor<String, String> METHOD_DESCRIPTOR = MethodDescriptor.<String, String>newBuilder()
.setType(MethodType.UNARY)
.setFullMethodName("service/method")
.setRequestMarshaller(StringMarshaller.INSTANCE)
.setResponseMarshaller(StringMarshaller.INSTANCE)
.build();
@Test
@Ignore
public void simulation() throws IOException {
Semaphore sem = new Semaphore(20, true);
Server server = NettyServerBuilder.forPort(0)
.addService(ServerServiceDefinition.builder("service")
.addMethod(METHOD_DESCRIPTOR, ServerCalls.asyncUnaryCall((req, observer) -> {
try {
sem.acquire();
TimeUnit.MILLISECONDS.sleep(100);
} catch (InterruptedException e) {
} finally {
sem.release();
}
observer.onNext("response");
observer.onCompleted();
}))
.build())
.build()
.start();
Limiter<GrpcClientRequestContext> limiter = new GrpcClientLimiterBuilder()
.blockOnLimit(true)
.build();
Channel channel = NettyChannelBuilder.forTarget("localhost:" + server.getPort())
.usePlaintext(true)
.intercept(new ConcurrencyLimitClientInterceptor(limiter))
.build();
AtomicLong counter = new AtomicLong();
Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> {
System.out.println(" " + counter.getAndSet(0) + " : " + limiter.toString());
}, 1, 1, TimeUnit.SECONDS);
for (int i = 0 ; i < 10000000; i++) {
counter.incrementAndGet();
ClientCalls.futureUnaryCall(channel.newCall(METHOD_DESCRIPTOR, CallOptions.DEFAULT), "request");
}
}
}
| 3,041 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc | Create_ds/concurrency-limits/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/server/ConcurrencyLimitServerInterceptor.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.grpc.server;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.internal.Preconditions;
import io.grpc.ForwardingServerCall;
import io.grpc.ForwardingServerCallListener;
import io.grpc.Metadata;
import io.grpc.ServerCall;
import io.grpc.ServerCall.Listener;
import io.grpc.ServerCallHandler;
import io.grpc.ServerInterceptor;
import io.grpc.Status;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
import java.util.function.Supplier;
/**
* {@link ServerInterceptor} that enforces per service and/or per method concurrent request limits and returns
* a Status.UNAVAILABLE when that limit has been reached.
*/
public class ConcurrencyLimitServerInterceptor implements ServerInterceptor {
private static final Logger LOG = LoggerFactory.getLogger(ConcurrencyLimitServerInterceptor.class);
private static final Status LIMIT_EXCEEDED_STATUS = Status.UNAVAILABLE.withDescription("Server concurrency limit reached");
private final Limiter<GrpcServerRequestContext> grpcLimiter;
private final Supplier<Status> statusSupplier;
private Supplier<Metadata> trailerSupplier;
public static class Builder {
private Supplier<Status> statusSupplier = () -> LIMIT_EXCEEDED_STATUS;
private Supplier<Metadata> trailerSupplier = Metadata::new;
private final Limiter<GrpcServerRequestContext> grpcLimiter;
public Builder(Limiter<GrpcServerRequestContext> grpcLimiter) {
Preconditions.checkArgument(grpcLimiter != null, "grpcLimiter cannot be null");
this.grpcLimiter = grpcLimiter;
}
/**
* Supplier for the Status code to return when the concurrency limit has been reached.
* A custom supplier could augment the response to include additional information about
* the server or limit. The supplier can also be used to trigger additional metrics.
* By default will return an UNAVAILABLE.
*
* @param supplier
* @return Chainable builder
*/
public Builder statusSupplier(Supplier<Status> supplier) {
Preconditions.checkArgument(supplier != null, "statusSupplier cannot be null");
this.statusSupplier = supplier;
return this;
}
/**
* Supplier for the Metadata to return when the concurrency limit has been reached.
* A custom supplier may include additional metadata about the server or limit
*
* @param supplier
* @return Chainable builder
*/
public Builder trailerSupplier(Supplier<Metadata> supplier) {
Preconditions.checkArgument(supplier != null, "trailerSupplier cannot be null");
this.trailerSupplier = supplier;
return this;
}
public ConcurrencyLimitServerInterceptor build() {
return new ConcurrencyLimitServerInterceptor(this);
}
}
public static Builder newBuilder(Limiter<GrpcServerRequestContext> grpcLimiter) {
return new Builder(grpcLimiter);
}
/**
* @deprecated Use {@link ConcurrencyLimitServerInterceptor#newBuilder(Limiter)}
* @param grpcLimiter
*/
@Deprecated
public ConcurrencyLimitServerInterceptor(Limiter<GrpcServerRequestContext> grpcLimiter) {
Preconditions.checkArgument(grpcLimiter != null, "grpcLimiter cannot be null");
this.grpcLimiter = grpcLimiter;
this.statusSupplier = () -> LIMIT_EXCEEDED_STATUS;
this.trailerSupplier = Metadata::new;
}
private ConcurrencyLimitServerInterceptor(Builder builder) {
this.grpcLimiter = builder.grpcLimiter;
this.statusSupplier = builder.statusSupplier;
this.trailerSupplier = builder.trailerSupplier;
}
@Override
public <ReqT, RespT> Listener<ReqT> interceptCall(final ServerCall<ReqT, RespT> call,
final Metadata headers,
final ServerCallHandler<ReqT, RespT> next) {
if (!call.getMethodDescriptor().getType().serverSendsOneMessage() || !call.getMethodDescriptor().getType().clientSendsOneMessage()) {
return next.startCall(call, headers);
}
return grpcLimiter
.acquire(new GrpcServerRequestContext() {
@Override
public ServerCall<?, ?> getCall() {
return call;
}
@Override
public Metadata getHeaders() {
return headers;
}
})
.map(new Function<Limiter.Listener, Listener<ReqT>>() {
final AtomicBoolean done = new AtomicBoolean(false);
void safeComplete(Runnable action) {
if (done.compareAndSet(false, true)) {
try {
action.run();
} catch (Throwable t) {
LOG.error("Critical error releasing limit", t);
}
}
}
@Override
public Listener<ReqT> apply(Limiter.Listener listener) {
final Listener<ReqT> delegate;
try {
delegate = next.startCall(
new ForwardingServerCall.SimpleForwardingServerCall<ReqT, RespT>(call) {
@Override
public void close(Status status, Metadata trailers) {
try {
super.close(status, trailers);
} finally {
safeComplete(() -> {
switch (status.getCode()) {
case DEADLINE_EXCEEDED:
listener.onDropped();
break;
default:
listener.onSuccess();
break;
}
});
}
}
},
headers);
} catch (Exception e) {
LOG.warn("Failed to create call", e);
safeComplete(listener::onIgnore);
throw e;
}
return new ForwardingServerCallListener.SimpleForwardingServerCallListener<ReqT>(delegate) {
@Override
public void onMessage(ReqT message) {
try {
super.onMessage(message);
} catch (Throwable t) {
LOG.error("Uncaught exception. Force releasing limit. ", t);
safeComplete(listener::onIgnore);
throw t;
}
}
@Override
public void onHalfClose() {
try {
super.onHalfClose();
} catch (Throwable t) {
LOG.error("Uncaught exception. Force releasing limit. ", t);
safeComplete(listener::onIgnore);
throw t;
}
}
@Override
public void onCancel() {
try {
super.onCancel();
} finally {
safeComplete(listener::onDropped);
}
}
};
}
})
.orElseGet(() -> {
call.close(statusSupplier.get(), trailerSupplier.get());
return new ServerCall.Listener<ReqT>() {};
});
}
}
| 3,042 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc | Create_ds/concurrency-limits/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/server/GrpcServerLimiterBuilder.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.grpc.server;
import com.netflix.concurrency.limits.limiter.AbstractPartitionedLimiter;
import io.grpc.Attributes;
import io.grpc.Metadata;
public class GrpcServerLimiterBuilder extends AbstractPartitionedLimiter.Builder<GrpcServerLimiterBuilder, GrpcServerRequestContext> {
/**
* Partition the limit by method
* @return Chainable builder
*/
public GrpcServerLimiterBuilder partitionByMethod() {
return partitionResolver((GrpcServerRequestContext context) -> context.getCall().getMethodDescriptor().getFullMethodName());
}
/**
* Partition the limit by a request header.
* @return Chainable builder
*/
public GrpcServerLimiterBuilder partitionByHeader(Metadata.Key<String> header) {
return partitionResolver(context -> context.getHeaders().get(header));
}
/**
* Partition the limit by a request attribute.
* @return Chainable builder
*/
public GrpcServerLimiterBuilder partitionByAttribute(Attributes.Key<String> attribute) {
return partitionResolver(context -> context.getCall().getAttributes().get(attribute));
}
@Override
protected GrpcServerLimiterBuilder self() {
return this;
}
}
| 3,043 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc | Create_ds/concurrency-limits/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/server/GrpcServerRequestContext.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.grpc.server;
import io.grpc.Metadata;
import io.grpc.ServerCall;
public interface GrpcServerRequestContext {
ServerCall<?, ?> getCall();
Metadata getHeaders();
} | 3,044 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc | Create_ds/concurrency-limits/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/client/GrpcClientLimiterBuilder.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.grpc.client;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.limiter.AbstractPartitionedLimiter;
import com.netflix.concurrency.limits.limiter.BlockingLimiter;
import com.netflix.concurrency.limits.limiter.SimpleLimiter;
import io.grpc.CallOptions;
/**
* Builder to simplify creating a {@link Limiter} specific to GRPC clients.
*/
public final class GrpcClientLimiterBuilder extends AbstractPartitionedLimiter.Builder<GrpcClientLimiterBuilder, GrpcClientRequestContext> {
private boolean blockOnLimit = false;
public GrpcClientLimiterBuilder partitionByMethod() {
return partitionResolver(context -> context.getMethod().getFullMethodName());
}
public GrpcClientLimiterBuilder partitionByCallOption(CallOptions.Key<String> option) {
return partitionResolver(context -> context.getCallOptions().getOption(option));
}
/**
* When set to true new calls to the channel will block when the limit has been reached instead
* of failing fast with an UNAVAILABLE status.
* @param blockOnLimit
* @return Chainable builder
*/
public <T> GrpcClientLimiterBuilder blockOnLimit(boolean blockOnLimit) {
this.blockOnLimit = blockOnLimit;
return this;
}
@Override
protected GrpcClientLimiterBuilder self() {
return this;
}
public Limiter<GrpcClientRequestContext> build() {
Limiter<GrpcClientRequestContext> limiter = super.build();
if (blockOnLimit) {
limiter = BlockingLimiter.wrap(limiter);
}
return limiter;
}
}
| 3,045 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc | Create_ds/concurrency-limits/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/client/ConcurrencyLimitClientInterceptor.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.grpc.client;
import com.google.common.base.Preconditions;
import com.netflix.concurrency.limits.Limiter;
import io.grpc.CallOptions;
import io.grpc.Channel;
import io.grpc.ClientCall;
import io.grpc.ClientInterceptor;
import io.grpc.ForwardingClientCall;
import io.grpc.ForwardingClientCallListener;
import io.grpc.Metadata;
import io.grpc.MethodDescriptor;
import io.grpc.Status;
import io.grpc.Status.Code;
import javax.annotation.Nullable;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* ClientInterceptor that enforces per service and/or per method concurrent request limits and returns
* a Status.UNAVAILABLE when that limit has been reached.
*/
public class ConcurrencyLimitClientInterceptor implements ClientInterceptor {
private static final Status LIMIT_EXCEEDED_STATUS = Status.UNAVAILABLE.withDescription("Client concurrency limit reached");
private final Limiter<GrpcClientRequestContext> grpcLimiter;
public ConcurrencyLimitClientInterceptor(final Limiter<GrpcClientRequestContext> grpcLimiter) {
Preconditions.checkArgument(grpcLimiter != null, "GrpcLimiter cannot not be null");
this.grpcLimiter = grpcLimiter;
}
@Override
public <ReqT, RespT> ClientCall<ReqT, RespT> interceptCall(final MethodDescriptor<ReqT, RespT> method,
final CallOptions callOptions, final Channel next) {
if (!method.getType().serverSendsOneMessage() || !method.getType().clientSendsOneMessage()) {
return next.newCall(method, callOptions);
}
return grpcLimiter
.acquire(new GrpcClientRequestContext() {
@Override
public MethodDescriptor<?, ?> getMethod() {
return method;
}
@Override
public CallOptions getCallOptions() {
return callOptions;
}
})
// Perform the operation and release the limiter once done.
.map(listener -> (ClientCall<ReqT, RespT>) new ForwardingClientCall.SimpleForwardingClientCall<ReqT, RespT>(next.newCall(method, callOptions)) {
final AtomicBoolean done = new AtomicBoolean(false);
@Override
public void start(final Listener<RespT> responseListener, final Metadata headers) {
super.start(new ForwardingClientCallListener.SimpleForwardingClientCallListener<RespT>(responseListener) {
@Override
public void onClose(final Status status, final Metadata trailers) {
try {
super.onClose(status, trailers);
} finally {
if (done.compareAndSet(false, true)) {
if (status.isOk()) {
listener.onSuccess();
} else if (Code.UNAVAILABLE == status.getCode()) {
listener.onDropped();
} else {
listener.onIgnore();
}
}
}
}
}, headers);
}
@Override
public void cancel(final @Nullable String message, final @Nullable Throwable cause) {
try {
super.cancel(message, cause);
} finally {
if (done.compareAndSet(false, true)) {
listener.onIgnore();
}
}
}
}
)
.orElseGet(() -> new ClientCall<ReqT, RespT>() {
private Listener<RespT> responseListener;
@Override
public void start(io.grpc.ClientCall.Listener<RespT> responseListener, Metadata headers) {
this.responseListener = responseListener;
}
@Override
public void request(int numMessages) {
}
@Override
public void cancel(String message, Throwable cause) {
}
@Override
public void halfClose() {
responseListener.onClose(LIMIT_EXCEEDED_STATUS, new Metadata());
}
@Override
public void sendMessage(ReqT message) {
}
}
);
}
}
| 3,046 |
0 | Create_ds/concurrency-limits/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc | Create_ds/concurrency-limits/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/client/GrpcClientRequestContext.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.grpc.client;
import io.grpc.CallOptions;
import io.grpc.MethodDescriptor;
public interface GrpcClientRequestContext {
MethodDescriptor<?, ?> getMethod();
CallOptions getCallOptions();
}
| 3,047 |
0 | Create_ds/concurrency-limits/concurrency-limits-spectator/src/test/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-spectator/src/test/java/com/netflix/concurrency/limits/spectator/SpectatorMetricRegistryTest.java | package com.netflix.concurrency.limits.spectator;
import org.junit.Assert;
import org.junit.Test;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.patterns.PolledMeter;
public class SpectatorMetricRegistryTest {
@Test
public void testGuage() {
DefaultRegistry registry = new DefaultRegistry();
SpectatorMetricRegistry metricRegistry = new SpectatorMetricRegistry(registry, registry.createId("foo"));
metricRegistry.gauge("bar", () -> 10);
PolledMeter.update(registry);
Assert.assertEquals(10.0, registry.gauge(registry.createId("foo.bar")).value(), 0);
}
@Test
public void testUnregister() {
DefaultRegistry registry = new DefaultRegistry();
SpectatorMetricRegistry metricRegistry = new SpectatorMetricRegistry(registry, registry.createId("foo"));
metricRegistry.gauge("bar", () -> 10);
metricRegistry.gauge("bar", () -> 20);
PolledMeter.update(registry);
Assert.assertEquals(20.0, registry.gauge(registry.createId("foo.bar")).value(), 0);
}
}
| 3,048 |
0 | Create_ds/concurrency-limits/concurrency-limits-spectator/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-spectator/src/main/java/com/netflix/concurrency/limits/spectator/SpectatorMetricRegistry.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.spectator;
import java.util.function.Supplier;
import com.netflix.concurrency.limits.MetricRegistry;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.DistributionSummary;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.patterns.PolledMeter;
public final class SpectatorMetricRegistry implements MetricRegistry {
private final Registry registry;
private final Id baseId;
public SpectatorMetricRegistry(Registry registry, Id baseId) {
this.registry = registry;
this.baseId = baseId;
}
@Override
public SampleListener distribution(String id, String... tagNameValuePairs) {
DistributionSummary summary = registry.distributionSummary(suffixBaseId(id).withTags(tagNameValuePairs));
return value -> summary.record(value.longValue());
}
@Override
public void gauge(String id, Supplier<Number> supplier, String... tagNameValuePairs) {
Id metricId = suffixBaseId(id).withTags(tagNameValuePairs);
PolledMeter.remove(registry, metricId);
PolledMeter.using(registry)
.withId(metricId)
.monitorValue(supplier, ignore -> supplier.get().doubleValue());
}
@Override
public Counter counter(String id, String... tagNameValuePairs) {
Id metricId = suffixBaseId(id).withTags(tagNameValuePairs);
com.netflix.spectator.api.Counter spectatorCounter = registry.counter(metricId);
return () -> spectatorCounter.increment();
}
private Id suffixBaseId(String suffix) {
return registry.createId(this.baseId.name() + "." + suffix).withTags(this.baseId.tags());
}
}
| 3,049 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/ExpAvgMeasurementTest.java | package com.netflix.concurrency.limits.limit;
import com.netflix.concurrency.limits.limit.measurement.ExpAvgMeasurement;
import org.junit.Assert;
import org.junit.Test;
import java.util.Arrays;
import java.util.List;
public class ExpAvgMeasurementTest {
@Test
public void testWarmup() {
ExpAvgMeasurement avg = new ExpAvgMeasurement(100, 10);
double expected[] = new double[]{10.0, 10.5, 11, 11.5, 12, 12.5, 13, 13.5, 14, 14.5};
for (int i = 0; i < 10; i++) {
double value = avg.add(i + 10).doubleValue();
Assert.assertEquals(expected[i], avg.get().doubleValue(), 0.01);
}
avg.add(100);
Assert.assertEquals(16.2, avg.get().doubleValue(), 0.1);
}
}
| 3,050 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/VegasLimitTest.java | package com.netflix.concurrency.limits.limit;
import junit.framework.Assert;
import org.junit.Test;
import java.util.concurrent.TimeUnit;
public class VegasLimitTest {
public static VegasLimit create() {
return VegasLimit.newBuilder()
.alpha(3)
.beta(6)
.smoothing(1.0)
.initialLimit(10)
.maxConcurrency(20)
.build();
}
@Test
public void largeLimitIncrease() {
VegasLimit limit = VegasLimit.newBuilder()
.initialLimit(10000)
.maxConcurrency(20000)
.build();
limit.onSample(0, TimeUnit.SECONDS.toNanos(10), 5000, false);
Assert.assertEquals(10000, limit.getLimit());
limit.onSample(0, TimeUnit.SECONDS.toNanos(10), 6000, false);
Assert.assertEquals(10024, limit.getLimit());
}
@Test
public void increaseLimit() {
VegasLimit limit = create();
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(10), 10, false);
Assert.assertEquals(10, limit.getLimit());
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(10), 11, false);
Assert.assertEquals(16, limit.getLimit());
}
@Test
public void decreaseLimit() {
VegasLimit limit = create();
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(10), 10, false);
Assert.assertEquals(10, limit.getLimit());
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(50), 11, false);
Assert.assertEquals(9, limit.getLimit());
}
@Test
public void noChangeIfWithinThresholds() {
VegasLimit limit = create();
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(10), 10, false);
Assert.assertEquals(10, limit.getLimit());
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(14), 14, false);
Assert.assertEquals(10, limit.getLimit());
}
@Test
public void decreaseSmoothing() {
VegasLimit limit = VegasLimit.newBuilder()
.decrease(current -> current / 2)
.smoothing(0.5)
.initialLimit(100)
.maxConcurrency(200)
.build();
// Pick up first min-rtt
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(10), 100, false);
Assert.assertEquals(100, limit.getLimit());
// First decrease
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(20), 100, false);
Assert.assertEquals(75, limit.getLimit());
// Second decrease
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(20), 100, false);
Assert.assertEquals(56, limit.getLimit());
}
@Test
public void decreaseWithoutSmoothing() {
VegasLimit limit = VegasLimit.newBuilder()
.decrease(current -> current / 2)
.initialLimit(100)
.maxConcurrency(200)
.build();
// Pick up first min-rtt
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(10), 100, false);
Assert.assertEquals(100, limit.getLimit());
// First decrease
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(20), 100, false);
Assert.assertEquals(50, limit.getLimit());
// Second decrease
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(20), 100, false);
Assert.assertEquals(25, limit.getLimit());
}
}
| 3,051 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/AIMDLimitTest.java | package com.netflix.concurrency.limits.limit;
import junit.framework.Assert;
import org.junit.Test;
import java.util.concurrent.TimeUnit;
public class AIMDLimitTest {
@Test
public void testDefault() {
AIMDLimit limiter = AIMDLimit.newBuilder().initialLimit(10).build();
Assert.assertEquals(10, limiter.getLimit());
}
@Test
public void increaseOnSuccess() {
AIMDLimit limiter = AIMDLimit.newBuilder().initialLimit(20).build();
limiter.onSample(0, TimeUnit.MILLISECONDS.toNanos(1), 10, false);
Assert.assertEquals(21, limiter.getLimit());
}
@Test
public void decreaseOnDrops() {
AIMDLimit limiter = AIMDLimit.newBuilder().initialLimit(30).build();
limiter.onSample(0, 0, 0, true);
Assert.assertEquals(27, limiter.getLimit());
}
@Test
public void successOverflow() {
AIMDLimit limiter = AIMDLimit.newBuilder().initialLimit(21).maxLimit(21).minLimit(0).build();
limiter.onSample(0, TimeUnit.MILLISECONDS.toNanos(1), 10, false);
// after success limit should still be at the max.
Assert.assertEquals(21, limiter.getLimit());
}
}
| 3,052 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/window/ImmutableAverageSampleWindowTest.java | /**
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.window;
import org.junit.Assert;
import org.junit.Test;
public class ImmutableAverageSampleWindowTest {
private final long bigRtt = 5000;
private final long moderateRtt = 500;
private final long lowRtt = 10;
@Test
public void calculateAverage() {
SampleWindow window = new ImmutableAverageSampleWindow();
window = window.addSample(bigRtt, 1, false);
window = window.addSample(moderateRtt, 1, false);
window = window.addSample(lowRtt, 1, false);
Assert.assertEquals((bigRtt + moderateRtt + lowRtt) / 3, window.getTrackedRttNanos());
}
@Test
public void droppedSampleShouldChangeTrackedAverage() {
SampleWindow window = new ImmutableAverageSampleWindow();
window = window.addSample(bigRtt, 1, false);
window = window.addSample(moderateRtt, 1, false);
window = window.addSample(lowRtt, 1, false);
window = window.addSample(bigRtt, 1, true);
Assert.assertEquals((bigRtt + moderateRtt + lowRtt + bigRtt) / 4, window.getTrackedRttNanos());
}
}
| 3,053 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/window/ImmutablePercentileSampleWindowTest.java | /**
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.window;
import org.junit.Assert;
import org.junit.Test;
public class ImmutablePercentileSampleWindowTest {
private final long bigRtt = 5000;
private final long moderateRtt = 500;
private final long lowRtt = 10;
@Test
public void calculateP50() {
SampleWindow window = new ImmutablePercentileSampleWindow(0.5, 10);
window = window.addSample(bigRtt, 0, false);
window = window.addSample(moderateRtt, 0, false);
window = window.addSample(lowRtt, 0, false);
Assert.assertEquals(moderateRtt, window.getTrackedRttNanos());
}
@Test
public void droppedSampleShouldChangeTrackedRtt() {
ImmutablePercentileSampleWindow window = new ImmutablePercentileSampleWindow(0.5, 10);
window = window.addSample(lowRtt, 1, false);
window = window.addSample(bigRtt, 1, true);
window = window.addSample(bigRtt, 1, true);
Assert.assertEquals(bigRtt, window.getTrackedRttNanos());
}
@Test
public void p999ReturnsSlowestObservedRtt() {
SampleWindow window = new ImmutablePercentileSampleWindow(0.999, 10);
window = window.addSample(bigRtt, 1, false);
window = window.addSample(moderateRtt, 1, false);
window = window.addSample(lowRtt, 1, false);
Assert.assertEquals(bigRtt, window.getTrackedRttNanos());
}
@Test
public void rttObservationOrderDoesntAffectResultValue() {
SampleWindow window = new ImmutablePercentileSampleWindow(0.999, 10);
window = window.addSample(moderateRtt, 1, false);
window = window.addSample(lowRtt, 1, false);
window = window.addSample(bigRtt, 1, false);
window = window.addSample(lowRtt, 1, false);
Assert.assertEquals(bigRtt, window.getTrackedRttNanos());
}
}
| 3,054 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/functions/SquareRootFunctionTest.java | package com.netflix.concurrency.limits.limit.functions;
import java.util.function.Function;
import org.junit.Assert;
import org.junit.Test;
public class SquareRootFunctionTest {
@Test
public void confirm0Index() {
Function<Integer, Integer> func = SquareRootFunction.create(4);
Assert.assertEquals(4, func.apply(0).intValue());
}
@Test
public void confirmMaxIndex() {
Function<Integer, Integer> func = SquareRootFunction.create(4);
Assert.assertEquals(31, func.apply(999).intValue());
}
@Test
public void confirmOutofLookupRange() {
Function<Integer, Integer> func = SquareRootFunction.create(4);
Assert.assertEquals(50, func.apply(2500).intValue());
}
}
| 3,055 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/functions/Log10RootFunctionTest.java | package com.netflix.concurrency.limits.limit.functions;
import org.junit.Assert;
import org.junit.Test;
import java.util.function.Function;
public class Log10RootFunctionTest {
@Test
public void test0Index() {
Function<Integer, Integer> func = Log10RootFunction.create(0);
Assert.assertEquals(1, func.apply(0).intValue());
}
@Test
public void testInRange() {
Function<Integer, Integer> func = Log10RootFunction.create(0);
Assert.assertEquals(2, func.apply(100).intValue());
}
@Test
public void testOutofLookupRange() {
Function<Integer, Integer> func = Log10RootFunction.create(0);
Assert.assertEquals(4, func.apply(10000).intValue());
}
} | 3,056 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/LifoBlockingLimiterTest.java | package com.netflix.concurrency.limits.limiter;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.limit.SettableLimit;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public class LifoBlockingLimiterTest {
private static final Logger LOGGER = LoggerFactory.getLogger(LifoBlockingLimiterTest.class);
final ExecutorService executor = Executors.newCachedThreadPool();
final SettableLimit limit = SettableLimit.startingAt(4);
final SimpleLimiter<Void> simpleLimiter = SimpleLimiter
.newBuilder()
.limit(limit)
.build();
final LifoBlockingLimiter<Void> blockingLimiter = LifoBlockingLimiter.newBuilder(simpleLimiter)
.backlogSize(10)
.backlogTimeout(1, TimeUnit.SECONDS)
.build();
@Test
public void blockWhenFullAndTimeout() {
// Acquire all 4 available tokens
for (int i = 0; i < 4; i++) {
Optional<Limiter.Listener> listener = blockingLimiter.acquire(null);
Assert.assertTrue(listener.isPresent());
}
// Next acquire will block for 1 second
long start = System.nanoTime();
Optional<Limiter.Listener> listener = blockingLimiter.acquire(null);
long duration = TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - start);
Assert.assertTrue(duration >= 1);
Assert.assertFalse(listener.isPresent());
}
@Test
public void unblockWhenFullBeforeTimeout() {
// Acquire all 4 available tokens
List<Optional<Limiter.Listener>> listeners = acquireN(blockingLimiter, 4);
// Schedule one to release in 250 msec
Executors.newSingleThreadScheduledExecutor().schedule(() -> listeners.get(0).get().onSuccess(), 250, TimeUnit.MILLISECONDS);
// Next acquire will block for 1 second
long start = System.nanoTime();
Optional<Limiter.Listener> listener = blockingLimiter.acquire(null);
long duration = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
Assert.assertTrue("Duration = " + duration, duration >= 200);
Assert.assertTrue(listener.isPresent());
}
@Test
public void rejectWhenBacklogSizeReached() throws InterruptedException {
acquireNAsync(blockingLimiter, 14);
// Small delay to make sure all acquire() calls have been made
TimeUnit.MILLISECONDS.sleep(250);
// Next acquire will reject with no delay
long start = System.nanoTime();
Optional<Limiter.Listener> listener = blockingLimiter.acquire(null);
long duration = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
Assert.assertTrue("Duration = " + duration, duration < 100);
Assert.assertFalse(listener.isPresent());
}
@Test
public void adaptWhenLimitIncreases() {
acquireN(blockingLimiter, 4);
limit.setLimit(5);
// Next acquire will succeed with no delay
long start = System.nanoTime();
Optional<Limiter.Listener> listener = blockingLimiter.acquire(null);
long duration = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
Assert.assertTrue("Duration = " + duration, duration < 100);
Assert.assertTrue(listener.isPresent());
}
@Test
public void adaptWhenLimitDecreases() {
List<Optional<Limiter.Listener>> listeners = acquireN(blockingLimiter, 4);
limit.setLimit(3);
listeners.get(0).get().onSuccess();
// Next acquire will reject and block
long start = System.nanoTime();
Optional<Limiter.Listener> listener = blockingLimiter.acquire(null);
long duration = TimeUnit.SECONDS.toMillis(System.nanoTime() - start);
Assert.assertTrue("Duration = " + duration, duration >= 1);
Assert.assertFalse(listener.isPresent());
}
@Test
public void verifyFifoOrder() {
// Make sure all tokens are acquired
List<Optional<Limiter.Listener>> firstBatch = acquireN(blockingLimiter, 4);
// Kick off 5 requests with a small delay to ensure futures are created in the correct order
List<Integer> values = new CopyOnWriteArrayList<>();
List<CompletableFuture<Void>> futures = IntStream.range(0, 5)
.peek(i -> {
try {
TimeUnit.MILLISECONDS.sleep(50);
} catch (InterruptedException e) {
}
})
.mapToObj(i -> CompletableFuture.<Void>supplyAsync(() -> {
Optional<Limiter.Listener> listener = blockingLimiter.acquire(null);
if (!listener.isPresent()) {
values.add(-1);
}
try {
values.add(i);
} finally {
listener.get().onSuccess();
}
return null;
}, executor))
.collect(Collectors.toList());
// Release the first batch of tokens
firstBatch.forEach(listener -> {
try {
TimeUnit.MILLISECONDS.sleep(100);
} catch (InterruptedException e) {
}
listener.get().onSuccess();
});
// Make sure all requests finished
futures.forEach(future -> {
try {
future.get();
} catch (Exception e) {
}
});
// Verify that results are in reverse order
Assert.assertEquals(Arrays.asList(4, 3, 2, 1, 0), values);
}
// this test reproduces the condition where a thread acquires a token just as it is timing out.
// before that was fixed, it would lead to a token getting lost.
@Test
public void timeoutAcquireRaceCondition() throws InterruptedException, ExecutionException {
// a limiter with a short timeout, and large backlog (we don't want it to hit that limit)
LifoBlockingLimiter<Void> limiter = LifoBlockingLimiter.newBuilder(simpleLimiter)
.backlogSize(1000)
.backlogTimeout(10, TimeUnit.MILLISECONDS)
.build();
// acquire all except one token
acquireN(limiter, 3);
// try to reproduce the problem a couple of times
for (int round = 0; round < 10; round++) {
// indicates if there has already been a timeout
AtomicBoolean firstTimeout = new AtomicBoolean(false);
// take the last token
Limiter.Listener one = limiter.acquire(null).get();
// in a bunch of threads in parallel, try to take one more. all of these will start to
// time out at around the same time
List<Future<?>> futures = new ArrayList<>();
for (int i = 0; i < 10; i++) {
futures.add(executor.submit(() -> {
Optional<Limiter.Listener> listener = limiter.acquire(null);
if (listener.isPresent()) {
// if we got the last token, release it again. this might give it to a
// thread that is in the process of timing out
listener.get().onSuccess();
} else if (firstTimeout.compareAndSet(false, true)) {
// if this is the first one that times out, then other threads are going to
// start timing out soon too, so it's time to release a token
one.onSuccess();
}
return null;
}));
}
// wait for this round to finish
for (Future<?> future : futures) {
future.get();
}
Assert.assertEquals(3, simpleLimiter.getInflight());
}
}
private List<Optional<Limiter.Listener>> acquireN(Limiter<Void> limiter, int N) {
return IntStream.range(0, N)
.mapToObj(i -> limiter.acquire(null))
.peek(listener -> Assert.assertTrue(listener.isPresent()))
.collect(Collectors.toList());
}
private List<CompletableFuture<Optional<Limiter.Listener>>> acquireNAsync(Limiter<Void> limiter, int N) {
return IntStream.range(0, N)
.mapToObj(i -> CompletableFuture.supplyAsync(() -> limiter.acquire(null), executor))
.collect(Collectors.toList());
}
}
| 3,057 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/BlockingLimiterTest.java | package com.netflix.concurrency.limits.limiter;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.limit.SettableLimit;
import org.junit.Assert;
import org.junit.Test;
import java.time.Duration;
import java.time.Instant;
import java.util.LinkedList;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.junit.Assert.assertTrue;
public class BlockingLimiterTest {
@Test
public void test() {
SettableLimit limit = SettableLimit.startingAt(10);
BlockingLimiter<Void> limiter = BlockingLimiter.wrap(SimpleLimiter.newBuilder().limit(limit).build());
LinkedList<Limiter.Listener> listeners = new LinkedList<>();
for (int i = 0; i < 10; i++) {
limiter.acquire(null).ifPresent(listeners::add);
}
limit.setLimit(1);
while (!listeners.isEmpty()) {
listeners.remove().onSuccess();
}
limiter.acquire(null);
}
@Test
public void testMultipleBlockedThreads() throws InterruptedException, ExecutionException, TimeoutException {
int numThreads = 8;
SettableLimit limit = SettableLimit.startingAt(1);
BlockingLimiter<Void> limiter = BlockingLimiter.wrap(SimpleLimiter.newBuilder().limit(limit).build());
ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
try {
for (Future<?> future : IntStream.range(0, numThreads)
.mapToObj(x -> executorService.submit(() -> limiter.acquire(null).get().onSuccess()))
.collect(Collectors.toList())) {
future.get(1, TimeUnit.SECONDS);
}
} finally {
executorService.shutdown();
}
}
@Test
public void testTimeout() {
Duration timeout = Duration.ofMillis(50);
SettableLimit limit = SettableLimit.startingAt(1);
BlockingLimiter<Void> limiter = BlockingLimiter.wrap(SimpleLimiter.newBuilder().limit(limit).build(), timeout);
// Acquire first, will succeeed an not block
limiter.acquire(null);
// Second acquire should time out after at least 50 millis
Instant before = Instant.now();
Assert.assertFalse(limiter.acquire(null).isPresent());
Instant after = Instant.now();
Duration delay = Duration.between(before, after);
assertTrue("Delay was " + delay.toMillis() + " millis", delay.compareTo(timeout) >= 0);
}
@Test(expected=TimeoutException.class)
public void testNoTimeout() throws InterruptedException, ExecutionException, TimeoutException {
SettableLimit limit = SettableLimit.startingAt(1);
BlockingLimiter<Void> limiter = BlockingLimiter.wrap(SimpleLimiter.newBuilder().limit(limit).build());
limiter.acquire(null);
CompletableFuture<Optional<Limiter.Listener>> future = CompletableFuture.supplyAsync(() -> limiter.acquire(null));
future.get(1, TimeUnit.SECONDS);
}
@Test(expected = IllegalArgumentException.class)
public void failOnHighTimeout() {
SettableLimit limit = SettableLimit.startingAt(1);
BlockingLimiter<Void> limiter = BlockingLimiter.wrap(SimpleLimiter.newBuilder().limit(limit).build(), Duration.ofDays(1));
}
}
| 3,058 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/AbstractPartitionedLimiterTest.java | package com.netflix.concurrency.limits.limiter;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.limit.FixedLimit;
import com.netflix.concurrency.limits.limit.SettableLimit;
import org.junit.Assert;
import org.junit.Test;
import java.util.Optional;
import java.util.function.Function;
public class AbstractPartitionedLimiterTest {
public static class TestPartitionedLimiter extends AbstractPartitionedLimiter<String> {
public static class Builder extends AbstractPartitionedLimiter.Builder<Builder, String> {
@Override
protected Builder self() {
return this;
}
}
public static Builder newBuilder() {
return new Builder();
}
public TestPartitionedLimiter(Builder builder) {
super(builder);
}
}
@Test
public void limitAllocatedToBins() {
AbstractPartitionedLimiter<String> limiter = (AbstractPartitionedLimiter<String>) TestPartitionedLimiter.newBuilder()
.partitionResolver(Function.identity())
.partition("batch", 0.3)
.partition("live", 0.7)
.limit(FixedLimit.of(10))
.build();
Assert.assertEquals(3, limiter.getPartition("batch").getLimit());
Assert.assertEquals(7, limiter.getPartition("live").getLimit());
}
@Test
public void useExcessCapacityUntilTotalLimit() {
AbstractPartitionedLimiter<String> limiter = (AbstractPartitionedLimiter<String>) TestPartitionedLimiter.newBuilder()
.partitionResolver(Function.identity())
.partition("batch", 0.3)
.partition("live", 0.7)
.limit(FixedLimit.of(10))
.build();
for (int i = 0; i < 10; i++) {
Assert.assertTrue(limiter.acquire("batch").isPresent());
Assert.assertEquals(i+1, limiter.getPartition("batch").getInflight());
}
Assert.assertFalse(limiter.acquire("batch").isPresent());
}
@Test
public void exceedTotalLimitForUnusedBin() {
AbstractPartitionedLimiter<String> limiter = (AbstractPartitionedLimiter<String>) TestPartitionedLimiter.newBuilder()
.partitionResolver(Function.identity())
.partition("batch", 0.3)
.partition("live", 0.7)
.limit(FixedLimit.of(10))
.build();
for (int i = 0; i < 10; i++) {
Assert.assertTrue(limiter.acquire("batch").isPresent());
Assert.assertEquals(i+1, limiter.getPartition("batch").getInflight());
}
Assert.assertFalse(limiter.acquire("batch").isPresent());
for (int i = 0; i < 7; i++) {
Assert.assertTrue(limiter.acquire("live").isPresent());
Assert.assertEquals(i+1, limiter.getPartition("live").getInflight());
}
Assert.assertFalse(limiter.acquire("live").isPresent());
}
@Test
public void rejectOnceAllLimitsReached() {
AbstractPartitionedLimiter<String> limiter = (AbstractPartitionedLimiter<String>) TestPartitionedLimiter.newBuilder()
.partitionResolver(Function.identity())
.partition("batch", 0.3)
.partition("live", 0.7)
.limit(FixedLimit.of(10))
.build();
for (int i = 0; i < 3; i++) {
Assert.assertTrue(limiter.acquire("batch").isPresent());
Assert.assertEquals(i+1, limiter.getPartition("batch").getInflight());
Assert.assertEquals(i+1, limiter.getInflight());
}
for (int i = 0; i < 7; i++) {
Assert.assertTrue(limiter.acquire("live").isPresent());
Assert.assertEquals(i+1, limiter.getPartition("live").getInflight());
Assert.assertEquals(i+4, limiter.getInflight());
}
Assert.assertFalse(limiter.acquire("batch").isPresent());
Assert.assertFalse(limiter.acquire("live").isPresent());
}
@Test
public void releaseLimit() {
AbstractPartitionedLimiter<String> limiter = (AbstractPartitionedLimiter<String>) TestPartitionedLimiter.newBuilder()
.partitionResolver(Function.identity())
.partition("batch", 0.3)
.partition("live", 0.7)
.limit(FixedLimit.of(10))
.build();
Optional<Limiter.Listener> completion = limiter.acquire("batch");
for (int i = 1; i < 10; i++) {
Assert.assertTrue(limiter.acquire("batch").isPresent());
Assert.assertEquals(i+1, limiter.getPartition("batch").getInflight());
}
Assert.assertEquals(10, limiter.getInflight());
Assert.assertFalse(limiter.acquire("batch").isPresent());
completion.get().onSuccess();
Assert.assertEquals(9, limiter.getPartition("batch").getInflight());
Assert.assertEquals(9, limiter.getInflight());
Assert.assertTrue(limiter.acquire("batch").isPresent());
Assert.assertEquals(10, limiter.getPartition("batch").getInflight());
Assert.assertEquals(10, limiter.getInflight());
}
@Test
public void setLimitReservesBusy() {
SettableLimit limit = SettableLimit.startingAt(10);
AbstractPartitionedLimiter<String> limiter = (AbstractPartitionedLimiter<String>) TestPartitionedLimiter.newBuilder()
.partitionResolver(Function.identity())
.partition("batch", 0.3)
.partition("live", 0.7)
.limit(limit)
.build();
limit.setLimit(10);
Assert.assertEquals(3, limiter.getPartition("batch").getLimit());
Assert.assertTrue(limiter.acquire("batch").isPresent());
Assert.assertEquals(1, limiter.getPartition("batch").getInflight());
Assert.assertEquals(1, limiter.getInflight());
limit.setLimit(20);
Assert.assertEquals(6, limiter.getPartition("batch").getLimit());
Assert.assertEquals(1, limiter.getPartition("batch").getInflight());
Assert.assertEquals(1, limiter.getInflight());
}
}
| 3,059 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/executor/BlockingAdaptiveExecutorSimulation.java | package com.netflix.concurrency.limits.executor;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.concurrent.Semaphore;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.limiter.SimpleLimiter;
import org.junit.Ignore;
import org.junit.Test;
import com.netflix.concurrency.limits.executors.BlockingAdaptiveExecutor;
import com.netflix.concurrency.limits.limit.AIMDLimit;
import com.netflix.concurrency.limits.limit.GradientLimit;
import com.netflix.concurrency.limits.limit.TracingLimitDecorator;
import com.netflix.concurrency.limits.limit.VegasLimit;
@Ignore("These are simulations and not tests")
public class BlockingAdaptiveExecutorSimulation {
@Test
public void test() {
Limiter<Void> limiter = SimpleLimiter.newBuilder().limit(AIMDLimit.newBuilder().initialLimit(10).build()).build();
Executor executor = BlockingAdaptiveExecutor.newBuilder().limiter(limiter).build();
run(10000, 20, executor, randomLatency(50, 150));
}
@Test
public void testVegas() {
Limiter<Void> limiter = SimpleLimiter.newBuilder()
.limit(TracingLimitDecorator.wrap(VegasLimit.newBuilder()
.initialLimit(100)
.build()))
.build();
Executor executor = BlockingAdaptiveExecutor.newBuilder().limiter(limiter).build();
run(10000, 50, executor, randomLatency(50, 150));
}
@Test
public void testGradient() {
Limiter<Void> limiter = SimpleLimiter.newBuilder()
.limit(TracingLimitDecorator.wrap(GradientLimit.newBuilder()
.initialLimit(100)
.build()))
.build();
Executor executor = BlockingAdaptiveExecutor.newBuilder().limiter(limiter).build();
run(100000, 50, executor, randomLatency(50, 150));
}
public void run(int iterations, int limit, Executor executor, Supplier<Long> latency) {
AtomicInteger requests = new AtomicInteger();
AtomicInteger busy = new AtomicInteger();
AtomicInteger counter = new AtomicInteger();
Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> {
System.out.println("" + counter.incrementAndGet() + " total=" + requests.getAndSet(0) + " busy=" + busy.get());
}, 1, 1, TimeUnit.SECONDS);
Semaphore sem = new Semaphore(limit, true);
for (int i = 0; i < iterations; i++) {
requests.incrementAndGet();
busy.incrementAndGet();
executor.execute(() -> {
try {
sem.acquire();
TimeUnit.MILLISECONDS.sleep(latency.get());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} finally {
sem.release();
busy.decrementAndGet();
}
});
}
}
public Supplier<Long> randomLatency(int min, int max) {
return () -> min + ThreadLocalRandom.current().nextLong(max - min);
}
}
| 3,060 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/MetricRegistry.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits;
import java.util.function.Supplier;
/**
* Simple abstraction for tracking metrics in the limiters.
*
*/
public interface MetricRegistry {
/**
* Listener to receive samples for a distribution
*/
interface SampleListener {
void addSample(Number value);
}
interface Counter {
void increment();
}
/**
* @deprecated Use {@link #distribution(String, String...)}
*/
@Deprecated
default SampleListener registerDistribution(String id, String... tagNameValuePairs) {
throw new UnsupportedOperationException("registerDistribution is deprecated");
}
/**
* Register a sample distribution. Samples are added to the distribution via the returned
* {@link SampleListener}. Will reuse an existing {@link SampleListener} if the distribution already
* exists.
*
* @param id
* @param tagNameValuePairs Pairs of tag name and tag value. Number of parameters must be a multiple of 2.
* @return SampleListener for the caller to add samples
*/
default SampleListener distribution(String id, String... tagNameValuePairs) {
return registerDistribution(id, tagNameValuePairs);
}
/**
* @deprecated Use {@link #gauge(String, Supplier, String...)}
*/
@Deprecated
default void registerGauge(String id, Supplier<Number> supplier, String... tagNameValuePairs) {
throw new UnsupportedOperationException("registerDistribution is deprecated");
}
/**
* Register a gauge using the provided supplier. The supplier will be polled whenever the guage
* value is flushed by the registry.
*
* @param id
* @param tagNameValuePairs Pairs of tag name and tag value. Number of parameters must be a multiple of 2.
* @param supplier
*/
default void gauge(String id, Supplier<Number> supplier, String... tagNameValuePairs) {
registerGauge(id, supplier, tagNameValuePairs);
};
/**
* Create a counter that will be increment when an event occurs. Counters normally translate in an action
* per second metric.
*
* @param id
* @param tagNameValuePairs
*/
default Counter counter(String id, String... tagNameValuePairs) {
return () -> {};
}
/**
* @deprecated Call MetricRegistry#registerGauge
*/
@Deprecated
default void registerGuage(String id, Supplier<Number> supplier, String... tagNameValuePairs) {
gauge(id, supplier, tagNameValuePairs);
}
}
| 3,061 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/Limiter.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits;
import java.util.Optional;
/**
* Contract for a concurrency limiter. The caller is expected to call acquire() for each request
* and must also release the returned listener when the operation completes. Releasing the Listener
* may trigger an update to the concurrency limit based on error rate or latency measurement.
*
* @param <ContextT> Some limiters take a context to perform more fine grained limits.
*/
@FunctionalInterface
public interface Limiter<ContextT> {
/**
*/
interface Listener {
/**
* Notification that the operation succeeded and internally measured latency should be
* used as an RTT sample
*/
void onSuccess();
/**
* The operation failed before any meaningful RTT measurement could be made and should
* be ignored to not introduce an artificially low RTT
*/
void onIgnore();
/**
* The request failed and was dropped due to being rejected by an external limit or hitting
* a timeout. Loss based {@link Limit} implementations will likely do an aggressive
* reducing in limit when this happens.
*/
void onDropped();
}
/**
* Acquire a token from the limiter. Returns an Optional.empty() if the limit has been exceeded.
* If acquired the caller must call one of the Listener methods when the operation has been completed
* to release the count.
*
* @param context Context for the request
* @return Optional.empty() if limit exceeded.
*/
Optional<Listener> acquire(ContextT context);
}
| 3,062 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/MetricIds.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits;
/**
* Common metric ids
*/
public final class MetricIds {
public static final String LIMIT_NAME = "limit";
public static final String CALL_NAME = "call";
public static final String INFLIGHT_NAME = "inflight";
public static final String PARTITION_LIMIT_NAME = "limit.partition";
public static final String MIN_RTT_NAME = "min_rtt";
public static final String WINDOW_MIN_RTT_NAME = "min_window_rtt";
public static final String WINDOW_QUEUE_SIZE_NAME = "queue_size";
private MetricIds() {}
}
| 3,063 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/Limit.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits;
import java.util.function.Consumer;
/**
* Contract for an algorithm that calculates a concurrency limit based on
* rtt measurements
*/
public interface Limit {
/**
* @return Current estimated limit
*/
int getLimit();
/**
* Register a callback to receive notification whenever the limit is updated to a new value
* @param consumer
*/
void notifyOnChange(Consumer<Integer> consumer);
/**
* Update the limiter with a sample
* @param startTime
* @param rtt
* @param inflight
* @param didDrop
*/
void onSample(long startTime, long rtt, int inflight, boolean didDrop);
}
| 3,064 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/LimiterRegistry.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits;
/**
* {@link Limiter} lookup for integrations that support multiple Limiters, i.e. one per RPC method.
*
* @param <ContextT>
*/
public interface LimiterRegistry<ContextT> {
Limiter<ContextT> get(String key);
static <ContextT> LimiterRegistry<ContextT> single(Limiter<ContextT> limiter) {
return key -> limiter;
}
}
| 3,065 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/VegasLimit.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit;
import com.netflix.concurrency.limits.MetricIds;
import com.netflix.concurrency.limits.MetricRegistry;
import com.netflix.concurrency.limits.MetricRegistry.SampleListener;
import com.netflix.concurrency.limits.internal.EmptyMetricRegistry;
import com.netflix.concurrency.limits.internal.Preconditions;
import com.netflix.concurrency.limits.limit.functions.Log10RootFunction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
/**
* Limiter based on TCP Vegas where the limit increases by alpha if the queue_use is small ({@literal <} alpha)
* and decreases by alpha if the queue_use is large ({@literal >} beta).
*
* Queue size is calculated using the formula,
* queue_use = limit − BWE×RTTnoLoad = limit × (1 − RTTnoLoad/RTTactual)
*
* For traditional TCP Vegas alpha is typically 2-3 and beta is typically 4-6. To allow for better growth and
* stability at higher limits we set alpha=Max(3, 10% of the current limit) and beta=Max(6, 20% of the current limit)
*/
public class VegasLimit extends AbstractLimit {
private static final Logger LOG = LoggerFactory.getLogger(VegasLimit.class);
private static final Function<Integer, Integer> LOG10 = Log10RootFunction.create(0);
public static class Builder {
private int initialLimit = 20;
private int maxConcurrency = 1000;
private MetricRegistry registry = EmptyMetricRegistry.INSTANCE;
private double smoothing = 1.0;
private Function<Integer, Integer> alphaFunc = (limit) -> 3 * LOG10.apply(limit.intValue());
private Function<Integer, Integer> betaFunc = (limit) -> 6 * LOG10.apply(limit.intValue());
private Function<Integer, Integer> thresholdFunc = (limit) -> LOG10.apply(limit.intValue());
private Function<Double, Double> increaseFunc = (limit) -> limit + LOG10.apply(limit.intValue());
private Function<Double, Double> decreaseFunc = (limit) -> limit - LOG10.apply(limit.intValue());
private int probeMultiplier = 30;
private Builder() {
}
/**
* The limiter will probe for a new noload RTT every probeMultiplier * current limit
* iterations. Default value is 30.
* @param probeMultiplier
* @return Chainable builder
*/
public Builder probeMultiplier(int probeMultiplier) {
this.probeMultiplier = probeMultiplier;
return this;
}
public Builder alpha(int alpha) {
this.alphaFunc = (ignore) -> alpha;
return this;
}
public Builder threshold(Function<Integer, Integer> threshold) {
this.thresholdFunc = threshold;
return this;
}
public Builder alpha(Function<Integer, Integer> alpha) {
this.alphaFunc = alpha;
return this;
}
public Builder beta(int beta) {
this.betaFunc = (ignore) -> beta;
return this;
}
public Builder beta(Function<Integer, Integer> beta) {
this.betaFunc = beta;
return this;
}
public Builder increase(Function<Double, Double> increase) {
this.increaseFunc = increase;
return this;
}
public Builder decrease(Function<Double, Double> decrease) {
this.decreaseFunc = decrease;
return this;
}
public Builder smoothing(double smoothing) {
this.smoothing = smoothing;
return this;
}
public Builder initialLimit(int initialLimit) {
this.initialLimit = initialLimit;
return this;
}
@Deprecated
public Builder tolerance(double tolerance) {
return this;
}
public Builder maxConcurrency(int maxConcurrency) {
this.maxConcurrency = maxConcurrency;
return this;
}
@Deprecated
public Builder backoffRatio(double ratio) {
return this;
}
public Builder metricRegistry(MetricRegistry registry) {
this.registry = registry;
return this;
}
public VegasLimit build() {
return new VegasLimit(this);
}
}
public static Builder newBuilder() {
return new Builder();
}
public static VegasLimit newDefault() {
return newBuilder().build();
}
/**
* Estimated concurrency limit based on our algorithm
*/
private volatile double estimatedLimit;
private volatile long rtt_noload = 0;
/**
* Maximum allowed limit providing an upper bound failsafe
*/
private final int maxLimit;
private final double smoothing;
private final Function<Integer, Integer> alphaFunc;
private final Function<Integer, Integer> betaFunc;
private final Function<Integer, Integer> thresholdFunc;
private final Function<Double, Double> increaseFunc;
private final Function<Double, Double> decreaseFunc;
private final SampleListener rttSampleListener;
private final int probeMultiplier;
private int probeCount = 0;
private double probeJitter;
private VegasLimit(Builder builder) {
super(builder.initialLimit);
this.estimatedLimit = builder.initialLimit;
this.maxLimit = builder.maxConcurrency;
this.alphaFunc = builder.alphaFunc;
this.betaFunc = builder.betaFunc;
this.increaseFunc = builder.increaseFunc;
this.decreaseFunc = builder.decreaseFunc;
this.thresholdFunc = builder.thresholdFunc;
this.smoothing = builder.smoothing;
this.probeMultiplier = builder.probeMultiplier;
resetProbeJitter();
this.rttSampleListener = builder.registry.distribution(MetricIds.MIN_RTT_NAME);
}
private void resetProbeJitter() {
probeJitter = ThreadLocalRandom.current().nextDouble(0.5, 1);
}
private boolean shouldProbe() {
return probeJitter * probeMultiplier * estimatedLimit <= probeCount;
}
@Override
protected int _update(long startTime, long rtt, int inflight, boolean didDrop) {
Preconditions.checkArgument(rtt > 0, "rtt must be >0 but got " + rtt);
probeCount++;
if (shouldProbe()) {
LOG.debug("Probe MinRTT {}", TimeUnit.NANOSECONDS.toMicros(rtt) / 1000.0);
resetProbeJitter();
probeCount = 0;
rtt_noload = rtt;
return (int)estimatedLimit;
}
if (rtt_noload == 0 || rtt < rtt_noload) {
LOG.debug("New MinRTT {}", TimeUnit.NANOSECONDS.toMicros(rtt) / 1000.0);
rtt_noload = rtt;
return (int)estimatedLimit;
}
rttSampleListener.addSample(rtt_noload);
return updateEstimatedLimit(rtt, inflight, didDrop);
}
private int updateEstimatedLimit(long rtt, int inflight, boolean didDrop) {
final int queueSize = (int) Math.ceil(estimatedLimit * (1 - (double)rtt_noload / rtt));
double newLimit;
// Treat any drop (i.e timeout) as needing to reduce the limit
if (didDrop) {
newLimit = decreaseFunc.apply(estimatedLimit);
// Prevent upward drift if not close to the limit
} else if (inflight * 2 < estimatedLimit) {
return (int)estimatedLimit;
} else {
int alpha = alphaFunc.apply((int)estimatedLimit);
int beta = betaFunc.apply((int)estimatedLimit);
int threshold = this.thresholdFunc.apply((int)estimatedLimit);
// Aggressive increase when no queuing
if (queueSize <= threshold) {
newLimit = estimatedLimit + beta;
// Increase the limit if queue is still manageable
} else if (queueSize < alpha) {
newLimit = increaseFunc.apply(estimatedLimit);
// Detecting latency so decrease
} else if (queueSize > beta) {
newLimit = decreaseFunc.apply(estimatedLimit);
// We're within he sweet spot so nothing to do
} else {
return (int)estimatedLimit;
}
}
newLimit = Math.max(1, Math.min(maxLimit, newLimit));
newLimit = (1 - smoothing) * estimatedLimit + smoothing * newLimit;
if ((int)newLimit != (int)estimatedLimit && LOG.isDebugEnabled()) {
LOG.debug("New limit={} minRtt={} ms winRtt={} ms queueSize={}",
(int)newLimit,
TimeUnit.NANOSECONDS.toMicros(rtt_noload) / 1000.0,
TimeUnit.NANOSECONDS.toMicros(rtt) / 1000.0,
queueSize);
}
estimatedLimit = newLimit;
return (int)estimatedLimit;
}
@Override
public String toString() {
return "VegasLimit [limit=" + getLimit() +
", rtt_noload=" + TimeUnit.NANOSECONDS.toMicros(rtt_noload) / 1000.0 +
" ms]";
}
}
| 3,066 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/WindowedLimit.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit;
import com.netflix.concurrency.limits.Limit;
import com.netflix.concurrency.limits.internal.Preconditions;
import com.netflix.concurrency.limits.limit.window.AverageSampleWindowFactory;
import com.netflix.concurrency.limits.limit.window.SampleWindow;
import com.netflix.concurrency.limits.limit.window.SampleWindowFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
public class WindowedLimit implements Limit {
private static final long DEFAULT_MIN_WINDOW_TIME = TimeUnit.SECONDS.toNanos(1);
private static final long DEFAULT_MAX_WINDOW_TIME = TimeUnit.SECONDS.toNanos(1);
private static final long DEFAULT_MIN_RTT_THRESHOLD = TimeUnit.MICROSECONDS.toNanos(100);
/**
* Minimum observed samples to filter out sample windows with not enough significant samples
*/
private static final int DEFAULT_WINDOW_SIZE = 10;
public static Builder newBuilder() {
return new Builder();
}
public static class Builder {
private long maxWindowTime = DEFAULT_MAX_WINDOW_TIME;
private long minWindowTime = DEFAULT_MIN_WINDOW_TIME;
private int windowSize = DEFAULT_WINDOW_SIZE;
private long minRttThreshold = DEFAULT_MIN_RTT_THRESHOLD;
private SampleWindowFactory sampleWindowFactory = AverageSampleWindowFactory.create();
/**
* Minimum window duration for sampling a new minRtt
*/
public Builder minWindowTime(long minWindowTime, TimeUnit units) {
Preconditions.checkArgument(units.toMillis(minWindowTime) >= 100, "minWindowTime must be >= 100 ms");
this.minWindowTime = units.toNanos(minWindowTime);
return this;
}
/**
* Maximum window duration for sampling a new minRtt
*/
public Builder maxWindowTime(long maxWindowTime, TimeUnit units) {
Preconditions.checkArgument(units.toMillis(maxWindowTime) >= 100, "maxWindowTime must be >= 100 ms");
this.maxWindowTime = units.toNanos(maxWindowTime);
return this;
}
/**
* Minimum sampling window size for finding a new minimum rtt
*/
public Builder windowSize(int windowSize) {
Preconditions.checkArgument(windowSize >= 10, "Window size must be >= 10");
this.windowSize = windowSize;
return this;
}
public Builder minRttThreshold(long threshold, TimeUnit units) {
this.minRttThreshold = units.toNanos(threshold);
return this;
}
public Builder sampleWindowFactory(SampleWindowFactory sampleWindowFactory) {
this.sampleWindowFactory = sampleWindowFactory;
return this;
}
public WindowedLimit build(Limit delegate) {
return new WindowedLimit(this, delegate);
}
}
private final Limit delegate;
/**
* End time for the sampling window at which point the limit should be updated
*/
private volatile long nextUpdateTime = 0;
private final long minWindowTime;
private final long maxWindowTime;
private final int windowSize;
private final long minRttThreshold;
private final Object lock = new Object();
private final SampleWindowFactory sampleWindowFactory;
/**
* Object tracking stats for the current sample window
*/
private final AtomicReference<SampleWindow> sample;
private WindowedLimit(Builder builder, Limit delegate) {
this.delegate = delegate;
this.minWindowTime = builder.minWindowTime;
this.maxWindowTime = builder.maxWindowTime;
this.windowSize = builder.windowSize;
this.minRttThreshold = builder.minRttThreshold;
this.sampleWindowFactory = builder.sampleWindowFactory;
this.sample = new AtomicReference<>(sampleWindowFactory.newInstance());
}
@Override
public void notifyOnChange(Consumer<Integer> consumer) {
delegate.notifyOnChange(consumer);
}
@Override
public void onSample(long startTime, long rtt, int inflight, boolean didDrop) {
long endTime = startTime + rtt;
if (rtt < minRttThreshold) {
return;
}
sample.updateAndGet(current -> current.addSample(rtt, inflight, didDrop));
if (endTime > nextUpdateTime) {
synchronized (lock) {
// Double check under the lock
if (endTime > nextUpdateTime) {
SampleWindow current = sample.getAndSet(sampleWindowFactory.newInstance());
nextUpdateTime = endTime + Math.min(Math.max(current.getCandidateRttNanos() * 2, minWindowTime), maxWindowTime);
if (isWindowReady(current)) {
delegate.onSample(startTime, current.getTrackedRttNanos(), current.getMaxInFlight(), current.didDrop());
}
}
}
}
}
private boolean isWindowReady(SampleWindow sample) {
return sample.getCandidateRttNanos() < Long.MAX_VALUE && sample.getSampleCount() >= windowSize;
}
@Override
public int getLimit() {
return delegate.getLimit();
}
}
| 3,067 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/Gradient2Limit.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit;
import com.netflix.concurrency.limits.MetricIds;
import com.netflix.concurrency.limits.MetricRegistry;
import com.netflix.concurrency.limits.MetricRegistry.SampleListener;
import com.netflix.concurrency.limits.internal.EmptyMetricRegistry;
import com.netflix.concurrency.limits.internal.Preconditions;
import com.netflix.concurrency.limits.limit.measurement.ExpAvgMeasurement;
import com.netflix.concurrency.limits.limit.measurement.Measurement;
import com.netflix.concurrency.limits.limit.measurement.SingleMeasurement;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
/**
* Concurrency limit algorithm that adjusts the limit based on the gradient of change of the current average RTT and
* a long term exponentially smoothed average RTT. Unlike traditional congestion control algorithms we use average
* instead of minimum since RPC methods can be very bursty due to various factors such as non-homogenous request
* processing complexity as well as a wide distribution of data size. We have also found that using minimum can result
* in an bias towards an impractically low base RTT resulting in excessive load shedding. An exponential decay is
* applied to the base RTT so that the value is kept stable yet is allowed to adapt to long term changes in latency
* characteristics.
*
* The core algorithm re-calculates the limit every sampling window (ex. 1 second) using the formula
*
* // Calculate the gradient limiting to the range [0.5, 1.0] to filter outliers
* gradient = max(0.5, min(1.0, longtermRtt / currentRtt));
*
* // Calculate the new limit by applying the gradient and allowing for some queuing
* newLimit = gradient * currentLimit + queueSize;
*
* // Update the limit using a smoothing factor (default 0.2)
* newLimit = currentLimit * (1-smoothing) + newLimit * smoothing
*
* The limit can be in one of three main states
*
* 1. Steady state
*
* In this state the average RTT is very stable and the current measurement whipsaws around this value, sometimes reducing
* the limit, sometimes increasing it.
*
* 2. Transition from steady state to load
*
* In this state either the RPS to latency has spiked. The gradient is {@literal <} 1.0 due to a growing request queue that
* cannot be handled by the system. Excessive requests and rejected due to the low limit. The baseline RTT grows using
* exponential decay but lags the current measurement, which keeps the gradient {@literal <} 1.0 and limit low.
*
* 3. Transition from load to steady state
*
* In this state the system goes back to steady state after a prolonged period of excessive load. Requests aren't rejected
* and the sample RTT remains low. During this state the long term RTT may take some time to go back to normal and could
* potentially be several multiples higher than the current RTT.
*/
public final class Gradient2Limit extends AbstractLimit {
private static final Logger LOG = LoggerFactory.getLogger(Gradient2Limit.class);
public static class Builder {
private int initialLimit = 20;
private int minLimit = 20;
private int maxConcurrency = 200;
private double smoothing = 0.2;
private Function<Integer, Integer> queueSize = concurrency -> 4;
private MetricRegistry registry = EmptyMetricRegistry.INSTANCE;
private int longWindow = 600;
private double rttTolerance = 1.5;
/**
* Initial limit used by the limiter
* @param initialLimit
* @return Chainable builder
*/
public Builder initialLimit(int initialLimit) {
this.initialLimit = initialLimit;
return this;
}
/**
* Minimum concurrency limit allowed. The minimum helps prevent the algorithm from adjust the limit
* too far down. Note that this limit is not desirable when use as backpressure for batch apps.
*
* @param minLimit
* @return Chainable builder
*/
public Builder minLimit(int minLimit) {
this.minLimit = minLimit;
return this;
}
/**
* Maximum allowable concurrency. Any estimated concurrency will be capped
* at this value
* @param maxConcurrency
* @return Chainable builder
*/
public Builder maxConcurrency(int maxConcurrency) {
this.maxConcurrency = maxConcurrency;
return this;
}
/**
* Fixed amount the estimated limit can grow while latencies remain low
* @param queueSize
* @return Chainable builder
*/
public Builder queueSize(int queueSize) {
this.queueSize = (ignore) -> queueSize;
return this;
}
/**
* Function to dynamically determine the amount the estimated limit can grow while
* latencies remain low as a function of the current limit.
* @param queueSize
* @return Chainable builder
*/
public Builder queueSize(Function<Integer, Integer> queueSize) {
this.queueSize = queueSize;
return this;
}
/**
* Tolerance for changes in minimum latency.
* @param rttTolerance Value {@literal >}= 1.0 indicating how much change in minimum latency is acceptable
* before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable.
* @return Chainable builder
*/
public Builder rttTolerance(double rttTolerance) {
Preconditions.checkArgument(rttTolerance >= 1.0, "Tolerance must be >= 1.0");
this.rttTolerance = rttTolerance;
return this;
}
/**
* Maximum multiple of the fast window after which we need to reset the limiter
* @param multiplier
* @return
*/
@Deprecated
public Builder driftMultiplier(int multiplier) {
return this;
}
/**
* Smoothing factor to limit how aggressively the estimated limit can shrink
* when queuing has been detected.
* @param smoothing Value of 0.0 to 1.0 where 1.0 means the limit is completely
* replicated by the new estimate.
* @return Chainable builder
*/
public Builder smoothing(double smoothing) {
this.smoothing = smoothing;
return this;
}
/**
* Registry for reporting metrics about the limiter's internal state.
* @param registry
* @return Chainable builder
*/
public Builder metricRegistry(MetricRegistry registry) {
this.registry = registry;
return this;
}
@Deprecated
public Builder shortWindow(int n) {
return this;
}
public Builder longWindow(int n) {
this.longWindow = n;
return this;
}
public Gradient2Limit build() {
return new Gradient2Limit(this);
}
}
public static Builder newBuilder() {
return new Builder();
}
public static Gradient2Limit newDefault() {
return newBuilder().build();
}
/**
* Estimated concurrency limit based on our algorithm
*/
private volatile double estimatedLimit;
/**
* Tracks a measurement of the short time, and more volatile, RTT meant to represent the current system latency
*/
private long lastRtt;
/**
* Tracks a measurement of the long term, less volatile, RTT meant to represent the baseline latency. When the system
* is under load this number is expect to trend higher.
*/
private final Measurement longRtt;
/**
* Maximum allowed limit providing an upper bound failsafe
*/
private final int maxLimit;
private final int minLimit;
private final Function<Integer, Integer> queueSize;
private final double smoothing;
private final SampleListener longRttSampleListener;
private final SampleListener shortRttSampleListener;
private final SampleListener queueSizeSampleListener;
private final double tolerance;
private Gradient2Limit(Builder builder) {
super(builder.initialLimit);
this.estimatedLimit = builder.initialLimit;
this.maxLimit = builder.maxConcurrency;
this.minLimit = builder.minLimit;
this.queueSize = builder.queueSize;
this.smoothing = builder.smoothing;
this.tolerance = builder.rttTolerance;
this.lastRtt = 0;
this.longRtt = new ExpAvgMeasurement(builder.longWindow, 10);
this.longRttSampleListener = builder.registry.distribution(MetricIds.MIN_RTT_NAME);
this.shortRttSampleListener = builder.registry.distribution(MetricIds.WINDOW_MIN_RTT_NAME);
this.queueSizeSampleListener = builder.registry.distribution(MetricIds.WINDOW_QUEUE_SIZE_NAME);
}
@Override
public int _update(final long startTime, final long rtt, final int inflight, final boolean didDrop) {
final double queueSize = this.queueSize.apply((int)this.estimatedLimit);
this.lastRtt = rtt;
final double shortRtt = (double)rtt;
final double longRtt = this.longRtt.add(rtt).doubleValue();
shortRttSampleListener.addSample(shortRtt);
longRttSampleListener.addSample(longRtt);
queueSizeSampleListener.addSample(queueSize);
// If the long RTT is substantially larger than the short RTT then reduce the long RTT measurement.
// This can happen when latency returns to normal after a prolonged prior of excessive load. Reducing the
// long RTT without waiting for the exponential smoothing helps bring the system back to steady state.
if (longRtt / shortRtt > 2) {
this.longRtt.update(current -> current.doubleValue() * 0.95);
}
// Don't grow the limit if we are app limited
if (inflight < estimatedLimit / 2) {
return (int) estimatedLimit;
}
// Rtt could be higher than rtt_noload because of smoothing rtt noload updates
// so set to 1.0 to indicate no queuing. Otherwise calculate the slope and don't
// allow it to be reduced by more than half to avoid aggressive load-shedding due to
// outliers.
final double gradient = Math.max(0.5, Math.min(1.0, tolerance * longRtt / shortRtt));
double newLimit = estimatedLimit * gradient + queueSize;
newLimit = estimatedLimit * (1 - smoothing) + newLimit * smoothing;
newLimit = Math.max(minLimit, Math.min(maxLimit, newLimit));
if ((int)estimatedLimit != newLimit) {
LOG.debug("New limit={} shortRtt={} ms longRtt={} ms queueSize={} gradient={}",
(int)newLimit,
getLastRtt(TimeUnit.MICROSECONDS) / 1000.0,
getRttNoLoad(TimeUnit.MICROSECONDS) / 1000.0,
queueSize,
gradient);
}
estimatedLimit = newLimit;
return (int)estimatedLimit;
}
public long getLastRtt(TimeUnit units) {
return units.convert(lastRtt, TimeUnit.NANOSECONDS);
}
public long getRttNoLoad(TimeUnit units) {
return units.convert(longRtt.get().longValue(), TimeUnit.NANOSECONDS);
}
@Override
public String toString() {
return "GradientLimit [limit=" + (int)estimatedLimit + "]";
}
}
| 3,068 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/AbstractLimit.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit;
import com.netflix.concurrency.limits.Limit;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.Consumer;
public abstract class AbstractLimit implements Limit {
private volatile int limit;
private final List<Consumer<Integer>> listeners = new CopyOnWriteArrayList<>();
protected AbstractLimit(int initialLimit) {
this.limit = initialLimit;
}
@Override
public final synchronized void onSample(long startTime, long rtt, int inflight, boolean didDrop) {
setLimit(_update(startTime, rtt, inflight, didDrop));
}
protected abstract int _update(long startTime, long rtt, int inflight, boolean didDrop);
@Override
public final int getLimit() {
return limit;
}
protected synchronized void setLimit(int newLimit) {
if (newLimit != limit) {
limit = newLimit;
listeners.forEach(listener -> listener.accept(newLimit));
}
}
public void notifyOnChange(Consumer<Integer> consumer) {
this.listeners.add(consumer);
}
}
| 3,069 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/AIMDLimit.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit;
import com.netflix.concurrency.limits.Limit;
import com.netflix.concurrency.limits.internal.Preconditions;
import java.util.concurrent.TimeUnit;
/**
* Loss based dynamic {@link Limit} that does an additive increment as long as
* there are no errors and a multiplicative decrement when there is an error.
*/
public final class AIMDLimit extends AbstractLimit {
private static final long DEFAULT_TIMEOUT = TimeUnit.SECONDS.toNanos(5);
public static class Builder {
private int minLimit = 20;
private int initialLimit = 20;
private int maxLimit = 200;
private double backoffRatio = 0.9;
private long timeout = DEFAULT_TIMEOUT;
public Builder initialLimit(int initialLimit) {
this.initialLimit = initialLimit;
return this;
}
public Builder minLimit(int minLimit) {
this.minLimit = minLimit;
return this;
}
public Builder maxLimit(int maxLimit) {
this.maxLimit = maxLimit;
return this;
}
public Builder backoffRatio(double backoffRatio) {
Preconditions.checkArgument(backoffRatio < 1.0 && backoffRatio >= 0.5, "Backoff ratio must be in the range [0.5, 1.0)");
this.backoffRatio = backoffRatio;
return this;
}
/**
* Timeout threshold that when exceeded equates to a drop.
* @param timeout
* @param units
* @return Chainable builder
*/
public Builder timeout(long timeout, TimeUnit units) {
Preconditions.checkArgument(timeout > 0, "Timeout must be positive");
this.timeout = units.toNanos(timeout);
return this;
}
public AIMDLimit build() {
return new AIMDLimit(this);
}
}
public static Builder newBuilder() {
return new Builder();
}
private final double backoffRatio;
private final long timeout;
private final int minLimit;
private final int maxLimit;
private AIMDLimit(Builder builder) {
super(builder.initialLimit);
this.backoffRatio = builder.backoffRatio;
this.timeout = builder.timeout;
this.maxLimit = builder.maxLimit;
this.minLimit = builder.minLimit;
}
@Override
protected int _update(long startTime, long rtt, int inflight, boolean didDrop) {
int currentLimit = getLimit();
if (didDrop || rtt > timeout) {
currentLimit = (int) (currentLimit * backoffRatio);
} else if (inflight * 2 >= currentLimit) {
currentLimit = currentLimit + 1;
}
return Math.min(maxLimit, Math.max(minLimit, currentLimit));
}
@Override
public String toString() {
return "AIMDLimit [limit=" + getLimit() + "]";
}
}
| 3,070 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/GradientLimit.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit;
import com.netflix.concurrency.limits.MetricIds;
import com.netflix.concurrency.limits.MetricRegistry;
import com.netflix.concurrency.limits.MetricRegistry.SampleListener;
import com.netflix.concurrency.limits.internal.EmptyMetricRegistry;
import com.netflix.concurrency.limits.internal.Preconditions;
import com.netflix.concurrency.limits.limit.functions.SquareRootFunction;
import com.netflix.concurrency.limits.limit.measurement.Measurement;
import com.netflix.concurrency.limits.limit.measurement.MinimumMeasurement;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
/**
* Concurrency limit algorithm that adjust the limits based on the gradient of change in the
* samples minimum RTT and absolute minimum RTT allowing for a queue of square root of the
* current limit. Why square root? Because it's better than a fixed queue size that becomes too
* small for large limits but still prevents the limit from growing too much by slowing down
* growth as the limit grows.
*/
public final class GradientLimit extends AbstractLimit {
private static final int DISABLED = -1;
private static final Logger LOG = LoggerFactory.getLogger(GradientLimit.class);
public static class Builder {
private int initialLimit = 50;
private int minLimit = 1;
private int maxConcurrency = 1000;
private double smoothing = 0.2;
private Function<Integer, Integer> queueSize = SquareRootFunction.create(4);
private MetricRegistry registry = EmptyMetricRegistry.INSTANCE;
private double rttTolerance = 2.0;
private int probeInterval = 1000;
private double backoffRatio = 0.9;
/**
* Minimum threshold for accepting a new rtt sample. Any RTT lower than this threshold
* will be discarded.
*
* @param minRttThreshold
* @param units
* @return Chainable builder
*/
@Deprecated
public Builder minRttThreshold(long minRttThreshold, TimeUnit units) {
return this;
}
/**
* Initial limit used by the limiter
* @param initialLimit
* @return Chainable builder
*/
public Builder initialLimit(int initialLimit) {
this.initialLimit = initialLimit;
return this;
}
/**
* Minimum concurrency limit allowed. The minimum helps prevent the algorithm from adjust the limit
* too far down. Note that this limit is not desirable when use as backpressure for batch apps.
*
* @param minLimit
* @return Chainable builder
*/
public Builder minLimit(int minLimit) {
this.minLimit = minLimit;
return this;
}
/**
* Tolerance for changes in minimum latency.
* @param rttTolerance Value {@literal >}= 1.0 indicating how much change in minimum latency is acceptable
* before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable.
* @return Chainable builder
*/
public Builder rttTolerance(double rttTolerance) {
Preconditions.checkArgument(rttTolerance >= 1.0, "Tolerance must be >= 1.0");
this.rttTolerance = rttTolerance;
return this;
}
/**
* Maximum allowable concurrency. Any estimated concurrency will be capped
* at this value
* @param maxConcurrency
* @return Chainable builder
*/
public Builder maxConcurrency(int maxConcurrency) {
this.maxConcurrency = maxConcurrency;
return this;
}
/**
* Fixed amount the estimated limit can grow while latencies remain low
* @param queueSize
* @return Chainable builder
*/
public Builder queueSize(int queueSize) {
this.queueSize = (ignore) -> queueSize;
return this;
}
/**
* Function to dynamically determine the amount the estimated limit can grow while
* latencies remain low as a function of the current limit.
* @param queueSize
* @return Chainable builder
*/
public Builder queueSize(Function<Integer, Integer> queueSize) {
this.queueSize = queueSize;
return this;
}
/**
* Smoothing factor to limit how aggressively the estimated limit can shrink
* when queuing has been detected.
* @param smoothing Value of 0.0 to 1.0 where 1.0 means the limit is completely
* replicated by the new estimate.
* @return Chainable builder
*/
public Builder smoothing(double smoothing) {
this.smoothing = smoothing;
return this;
}
/**
* Registry for reporting metrics about the limiter's internal state.
* @param registry
* @return Chainable builder
*/
public Builder metricRegistry(MetricRegistry registry) {
this.registry = registry;
return this;
}
/**
* Ratio applied to the limit when a timeout was identified within the sampling window. The default value is
* 0.9. A value of 1.0 means no backoff.
* @param backoffRatio
* @return
*/
public Builder backoffRatio(double backoffRatio) {
Preconditions.checkArgument(backoffRatio >= 0.5 && backoffRatio <= 1.0, "backoffRatio must be in the range [0.5, 1.0]");
this.backoffRatio = backoffRatio;
return this;
}
@Deprecated
public Builder probeMultiplier(int probeMultiplier) {
return this;
}
/**
* The limiter will probe for a new noload RTT every probeInterval
* updates. Default value is 1000. Set to -1 to disable
* @param probeInterval
* @return Chainable builder
*/
public Builder probeInterval(int probeInterval) {
this.probeInterval = probeInterval;
return this;
}
public GradientLimit build() {
return new GradientLimit(this);
}
}
public static Builder newBuilder() {
return new Builder();
}
public static GradientLimit newDefault() {
return newBuilder().build();
}
/**
* Estimated concurrency limit based on our algorithm
*/
private volatile double estimatedLimit;
private long lastRtt = 0;
private final Measurement rttNoLoadMeasurement;
/**
* Maximum allowed limit providing an upper bound failsafe
*/
private final int maxLimit;
private final int minLimit;
private final Function<Integer, Integer> queueSize;
private final double smoothing;
private final double rttTolerance;
private final double backoffRatio;
private final SampleListener minRttSampleListener;
private final SampleListener minWindowRttSampleListener;
private final SampleListener queueSizeSampleListener;
private final int probeInterval;
private int resetRttCounter;
private GradientLimit(Builder builder) {
super(builder.initialLimit);
this.estimatedLimit = builder.initialLimit;
this.maxLimit = builder.maxConcurrency;
this.minLimit = builder.minLimit;
this.queueSize = builder.queueSize;
this.smoothing = builder.smoothing;
this.rttTolerance = builder.rttTolerance;
this.backoffRatio = builder.backoffRatio;
this.probeInterval = builder.probeInterval;
this.resetRttCounter = nextProbeCountdown();
this.rttNoLoadMeasurement = new MinimumMeasurement();
this.minRttSampleListener = builder.registry.distribution(MetricIds.MIN_RTT_NAME);
this.minWindowRttSampleListener = builder.registry.distribution(MetricIds.WINDOW_MIN_RTT_NAME);
this.queueSizeSampleListener = builder.registry.distribution(MetricIds.WINDOW_QUEUE_SIZE_NAME);
}
private int nextProbeCountdown() {
if (probeInterval == DISABLED) {
return DISABLED;
}
return probeInterval + ThreadLocalRandom.current().nextInt(probeInterval);
}
@Override
public int _update(final long startTime, final long rtt, final int inflight, final boolean didDrop) {
lastRtt = rtt;
minWindowRttSampleListener.addSample(rtt);
final double queueSize = this.queueSize.apply((int)this.estimatedLimit);
queueSizeSampleListener.addSample(queueSize);
// Reset or probe for a new noload RTT and a new estimatedLimit. It's necessary to cut the limit
// in half to avoid having the limit drift upwards when the RTT is probed during heavy load.
// To avoid decreasing the limit too much we don't allow it to go lower than the queueSize.
if (probeInterval != DISABLED && resetRttCounter-- <= 0) {
resetRttCounter = nextProbeCountdown();
estimatedLimit = Math.max(minLimit, queueSize);
rttNoLoadMeasurement.reset();
lastRtt = 0;
LOG.debug("Probe MinRTT limit={}", getLimit());
return (int)estimatedLimit;
}
final long rttNoLoad = rttNoLoadMeasurement.add(rtt).longValue();
minRttSampleListener.addSample(rttNoLoad);
// Rtt could be higher than rtt_noload because of smoothing rtt noload updates
// so set to 1.0 to indicate no queuing. Otherwise calculate the slope and don't
// allow it to be reduced by more than half to avoid aggressive load-sheding due to
// outliers.
final double gradient = Math.max(0.5, Math.min(1.0, rttTolerance * rttNoLoad / rtt));
double newLimit;
// Reduce the limit aggressively if there was a drop
if (didDrop) {
newLimit = estimatedLimit * backoffRatio;
// Don't grow the limit if we are app limited
} else if (inflight < estimatedLimit / 2) {
return (int)estimatedLimit;
} else {
newLimit = estimatedLimit * gradient + queueSize;
}
if (newLimit < estimatedLimit) {
newLimit = Math.max(minLimit, estimatedLimit * (1-smoothing) + smoothing*(newLimit));
}
newLimit = Math.max(queueSize, Math.min(maxLimit, newLimit));
if ((int)newLimit != (int)estimatedLimit && LOG.isDebugEnabled()) {
LOG.debug("New limit={} minRtt={} ms winRtt={} ms queueSize={} gradient={} resetCounter={}",
(int)newLimit,
TimeUnit.NANOSECONDS.toMicros(rttNoLoad)/1000.0,
TimeUnit.NANOSECONDS.toMicros(rtt)/1000.0,
queueSize,
gradient,
resetRttCounter);
}
estimatedLimit = newLimit;
return (int)estimatedLimit;
}
public long getLastRtt(TimeUnit units) {
return units.convert(lastRtt, TimeUnit.NANOSECONDS);
}
public long getRttNoLoad(TimeUnit units) {
return units.convert(rttNoLoadMeasurement.get().longValue(), TimeUnit.NANOSECONDS);
}
@Override
public String toString() {
return "GradientLimit [limit=" + (int)estimatedLimit +
", rtt_noload=" + TimeUnit.MICROSECONDS.toMillis(rttNoLoadMeasurement.get().longValue()) / 1000.0+
" ms]";
}
}
| 3,071 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/FixedLimit.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit;
/**
* Non dynamic limit with fixed value
*/
public final class FixedLimit extends AbstractLimit {
public static FixedLimit of(int limit) {
return new FixedLimit(limit);
}
private FixedLimit(int limit) {
super(limit);
}
@Override
public int _update(long startTime, long rtt, int inflight, boolean didDrop) {
return getLimit();
}
@Override
public String toString() {
return "FixedLimit [limit=" + getLimit() + "]";
}
}
| 3,072 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/SettableLimit.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit;
import com.netflix.concurrency.limits.Limit;
/**
* {@link Limit} to be used mostly for testing where the limit can be manually
* adjusted.
*/
public class SettableLimit extends AbstractLimit {
public static SettableLimit startingAt(int limit) {
return new SettableLimit(limit);
}
public SettableLimit(int limit) {
super(limit);
}
@Override
protected int _update(long startTime, long rtt, int inflight, boolean didDrop) {
return getLimit();
}
public synchronized void setLimit(int limit) {
super.setLimit(limit);
}
@Override
public String toString() {
return "SettableLimit [limit=" + getLimit() + "]";
}
}
| 3,073 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/TracingLimitDecorator.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit;
import com.netflix.concurrency.limits.Limit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
public class TracingLimitDecorator implements Limit {
private static final Logger LOG = LoggerFactory.getLogger(TracingLimitDecorator.class);
private final Limit delegate;
public static TracingLimitDecorator wrap(Limit delegate) {
return new TracingLimitDecorator(delegate);
}
public TracingLimitDecorator(Limit delegate) {
this.delegate = delegate;
}
@Override
public int getLimit() {
return delegate.getLimit();
}
@Override
public void onSample(long startTime, long rtt, int inflight, boolean didDrop) {
LOG.debug("maxInFlight={} minRtt={} ms",
inflight,
TimeUnit.NANOSECONDS.toMicros(rtt) / 1000.0);
delegate.onSample(startTime, rtt, inflight, didDrop);
}
@Override
public void notifyOnChange(Consumer<Integer> consumer) {
delegate.notifyOnChange(consumer);
}
}
| 3,074 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/measurement/MinimumMeasurement.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.measurement;
import java.util.function.Function;
public class MinimumMeasurement implements Measurement {
private Double value = 0.0;
@Override
public Number add(Number sample) {
if (value == 0.0 || sample.doubleValue() < value) {
value = sample.doubleValue();
}
return value;
}
@Override
public Number get() {
return value;
}
@Override
public void reset() {
value = 0.0;
}
@Override
public void update(Function<Number, Number> operation) {
}
}
| 3,075 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/measurement/SingleMeasurement.java | package com.netflix.concurrency.limits.limit.measurement;
import java.util.function.Function;
public class SingleMeasurement implements Measurement {
private Number value = null;
@Override
public Number add(Number sample) {
return value = sample;
}
@Override
public Number get() {
return value;
}
@Override
public void reset() {
value = null;
}
@Override
public void update(Function<Number, Number> operation) {
value = operation.apply(value);
}
}
| 3,076 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/measurement/Measurement.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.measurement;
import java.util.function.Function;
/**
* Contract for tracking a measurement such as a minimum or average of a sample set
*/
public interface Measurement {
/**
* Add a single sample and update the internal state.
* @param sample
* @return True if internal state was updated
*/
Number add(Number sample);
/**
* @return Return the current value
*/
Number get();
/**
* Reset the internal state as if no samples were ever added
*/
void reset();
void update(Function<Number, Number> operation);
}
| 3,077 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/measurement/ExpAvgMeasurement.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.measurement;
import java.util.function.BiFunction;
import java.util.function.Function;
public class ExpAvgMeasurement implements Measurement {
private Double value = 0.0;
private Double sum = 0.0;
private final int window;
private final int warmupWindow;
private int count = 0;
public ExpAvgMeasurement(int window, int warmupWindow) {
this.window = window;
this.warmupWindow = warmupWindow;
this.sum = 0.0;
}
@Override
public Number add(Number sample) {
if (count < warmupWindow) {
count++;
sum += sample.doubleValue();
value = sum / count;
} else {
double factor = factor(window);
value = value * (1-factor) + sample.doubleValue() * factor;
}
return value;
}
private static double factor(int n) {
return 2.0 / (n + 1);
}
@Override
public Number get() {
return value;
}
@Override
public void reset() {
value = 0.0;
count = 0;
sum = 0.0;
}
@Override
public void update(Function<Number, Number> operation) {
this.value = operation.apply(value).doubleValue();
}
}
| 3,078 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/window/AverageSampleWindowFactory.java | /**
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.window;
public class AverageSampleWindowFactory implements SampleWindowFactory {
private static final AverageSampleWindowFactory INSTANCE = new AverageSampleWindowFactory();
private AverageSampleWindowFactory() {}
public static AverageSampleWindowFactory create() {
return INSTANCE;
}
@Override
public ImmutableAverageSampleWindow newInstance() {
return new ImmutableAverageSampleWindow();
}
}
| 3,079 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/window/ImmutablePercentileSampleWindow.java | /**
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.window;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLongArray;
class ImmutablePercentileSampleWindow implements SampleWindow {
private final long minRtt;
private final int maxInFlight;
private final boolean didDrop;
private final AtomicLongArray observedRtts;
private final int sampleCount;
private final double percentile;
ImmutablePercentileSampleWindow(double percentile, int windowSize) {
this.minRtt = Long.MAX_VALUE;
this.maxInFlight = 0;
this.didDrop = false;
this.observedRtts = new AtomicLongArray(windowSize);
this.sampleCount = 0;
this.percentile = percentile;
}
private ImmutablePercentileSampleWindow(
long minRtt,
int maxInFlight,
boolean didDrop,
AtomicLongArray observedRtts,
int sampleCount,
double percentile
) {
this.minRtt = minRtt;
this.maxInFlight = maxInFlight;
this.didDrop = didDrop;
this.observedRtts = observedRtts;
this.sampleCount = sampleCount;
this.percentile = percentile;
}
@Override
public ImmutablePercentileSampleWindow addSample(long rtt, int inflight, boolean didDrop) {
if (sampleCount >= observedRtts.length()) {
return this;
}
observedRtts.set(sampleCount, rtt);
return new ImmutablePercentileSampleWindow(
Math.min(minRtt, rtt),
Math.max(inflight, this.maxInFlight),
this.didDrop || didDrop,
observedRtts,
sampleCount + 1,
percentile
);
}
@Override
public long getCandidateRttNanos() {
return minRtt;
}
@Override
public long getTrackedRttNanos() {
if (sampleCount == 0) {
return 0;
}
long[] copyOfObservedRtts = new long[sampleCount];
for (int i = 0; i < sampleCount; i++) {
copyOfObservedRtts[i] = observedRtts.get(i);
}
Arrays.sort(copyOfObservedRtts);
int rttIndex = (int) Math.round(sampleCount * percentile);
int zeroBasedRttIndex = rttIndex - 1;
return copyOfObservedRtts[zeroBasedRttIndex];
}
@Override
public int getMaxInFlight() {
return maxInFlight;
}
@Override
public int getSampleCount() {
return sampleCount;
}
@Override
public boolean didDrop() {
return didDrop;
}
@Override
public String toString() {
return "ImmutablePercentileSampleWindow ["
+ "minRtt=" + TimeUnit.NANOSECONDS.toMicros(minRtt) / 1000.0
+ ", p" + percentile + " rtt=" + TimeUnit.NANOSECONDS.toMicros(getTrackedRttNanos()) / 1000.0
+ ", maxInFlight=" + maxInFlight
+ ", sampleCount=" + sampleCount
+ ", didDrop=" + didDrop + "]";
}
}
| 3,080 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/window/SampleWindow.java | /**
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.window;
/**
* Implementations of this interface are being used to track immutable samples in an AtomicReference
*
* @see com.netflix.concurrency.limits.limit.WindowedLimit
*/
public interface SampleWindow {
SampleWindow addSample(long rtt, int inflight, boolean dropped);
long getCandidateRttNanos();
long getTrackedRttNanos();
int getMaxInFlight();
int getSampleCount();
boolean didDrop();
}
| 3,081 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/window/ImmutableAverageSampleWindow.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.window;
import java.util.concurrent.TimeUnit;
class ImmutableAverageSampleWindow implements SampleWindow {
private final long minRtt;
private final long sum;
private final int maxInFlight;
private final int sampleCount;
private final boolean didDrop;
ImmutableAverageSampleWindow() {
this.minRtt = Long.MAX_VALUE;
this.sum = 0;
this.maxInFlight = 0;
this.sampleCount = 0;
this.didDrop = false;
}
ImmutableAverageSampleWindow(long minRtt, long sum, int maxInFlight, int sampleCount, boolean didDrop) {
this.minRtt = minRtt;
this.sum = sum;
this.maxInFlight = maxInFlight;
this.sampleCount = sampleCount;
this.didDrop = didDrop;
}
@Override
public ImmutableAverageSampleWindow addSample(long rtt, int inflight, boolean didDrop) {
return new ImmutableAverageSampleWindow(
Math.min(rtt, minRtt),
sum + rtt,
Math.max(inflight, this.maxInFlight),
sampleCount + 1,
this.didDrop || didDrop
);
}
@Override
public long getCandidateRttNanos() {
return minRtt;
}
@Override
public long getTrackedRttNanos() {
return sampleCount == 0 ? 0 : sum / sampleCount;
}
@Override
public int getMaxInFlight() {
return maxInFlight;
}
@Override
public int getSampleCount() {
return sampleCount;
}
@Override
public boolean didDrop() {
return didDrop;
}
@Override
public String toString() {
return "ImmutableAverageSampleWindow ["
+ "minRtt=" + TimeUnit.NANOSECONDS.toMicros(minRtt) / 1000.0
+ ", avgRtt=" + TimeUnit.NANOSECONDS.toMicros(getTrackedRttNanos()) / 1000.0
+ ", maxInFlight=" + maxInFlight
+ ", sampleCount=" + sampleCount
+ ", didDrop=" + didDrop + "]";
}
}
| 3,082 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/window/PercentileSampleWindowFactory.java | /**
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.window;
import com.netflix.concurrency.limits.internal.Preconditions;
public class PercentileSampleWindowFactory implements SampleWindowFactory {
private final double percentile;
private final int windowSize;
private PercentileSampleWindowFactory(double percentile, int windowSize) {
this.percentile = percentile;
this.windowSize = windowSize;
}
public static PercentileSampleWindowFactory of(double percentile, int windowSize) {
Preconditions.checkArgument(percentile > 0 && percentile < 1.0, "Percentile should belong to (0, 1.0)");
return new PercentileSampleWindowFactory(percentile, windowSize);
}
@Override
public ImmutablePercentileSampleWindow newInstance() {
return new ImmutablePercentileSampleWindow(percentile, windowSize);
}
}
| 3,083 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/window/SampleWindowFactory.java | /**
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.window;
public interface SampleWindowFactory {
SampleWindow newInstance();
}
| 3,084 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/functions/Log10RootFunction.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.functions;
import java.util.function.Function;
import java.util.stream.IntStream;
/**
* Function used by limiters to calculate thredsholds using log10 of the current limit.
* Here we pre-compute the log10 of numbers up to 1000 as an optimization.
*/
public final class Log10RootFunction implements Function<Integer, Integer> {
static final int[] lookup = new int[1000];
static {
IntStream.range(0, 1000).forEach(i -> lookup[i] = Math.max(1, (int)Math.log10(i)));
}
private static final Log10RootFunction INSTANCE = new Log10RootFunction();
/**
* Create an instance of a function that returns : baseline + sqrt(limit)
*
* @param baseline
* @return
*/
public static Function<Integer, Integer> create(int baseline) {
return INSTANCE.andThen(t -> t + baseline);
}
@Override
public Integer apply(Integer t) {
return t < 1000 ? lookup[t] : (int)Math.log10(t);
}
}
| 3,085 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/functions/SquareRootFunction.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limit.functions;
import java.util.function.Function;
import java.util.stream.IntStream;
/**
* Specialized utility function used by limiters to calculate thredsholds using square root
* of the current limit. Here we pre-compute the square root of numbers up to 1000 because
* the square root operation can be slow.
*/
public final class SquareRootFunction implements Function<Integer, Integer> {
static final int[] lookup = new int[1000];
static {
IntStream.range(0, 1000).forEach(i -> lookup[i] = Math.max(1, (int)Math.sqrt(i)));
}
private static final SquareRootFunction INSTANCE = new SquareRootFunction();
/**
* Create an instance of a function that returns : baseline + sqrt(limit)
*
* @param baseline
* @return
*/
public static Function<Integer, Integer> create(int baseline) {
return INSTANCE.andThen(t -> Math.max(baseline, t));
}
@Override
public Integer apply(Integer t) {
return t < 1000 ? lookup[t] : (int)Math.sqrt(t);
}
}
| 3,086 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/AbstractPartitionedLimiter.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limiter;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.MetricIds;
import com.netflix.concurrency.limits.MetricRegistry;
import com.netflix.concurrency.limits.internal.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
public abstract class AbstractPartitionedLimiter<ContextT> extends AbstractLimiter<ContextT> {
private static final Logger LOG = LoggerFactory.getLogger(AbstractPartitionedLimiter.class);
private static final String PARTITION_TAG_NAME = "partition";
public abstract static class Builder<BuilderT extends AbstractLimiter.Builder<BuilderT>, ContextT> extends AbstractLimiter.Builder<BuilderT> {
private List<Function<ContextT, String>> partitionResolvers = new ArrayList<>();
private final Map<String, Partition> partitions = new LinkedHashMap<>();
private int maxDelayedThreads = 100;
/**
* Add a resolver from context to a partition name. Multiple resolvers may be added and will be processed in
* order with this first non-null value response used as the partition name. If all resolvers return null then
* the unknown partition is used
* @param contextToPartition
* @return Chainable builder
*/
public BuilderT partitionResolver(Function<ContextT, String> contextToPartition) {
this.partitionResolvers.add(contextToPartition);
return self();
}
/**
* Specify percentage of limit guarantees for a partition. The total sum of partitions must add up to 100%
* @param name
* @param percent
* @return Chainable builder
*/
public BuilderT partition(String name, double percent) {
Preconditions.checkArgument(name != null, "Partition name may not be null");
Preconditions.checkArgument(percent >= 0.0 && percent <= 1.0, "Partition percentage must be in the range [0.0, 1.0]");
partitions.computeIfAbsent(name, Partition::new).setPercent(percent);
return self();
}
/**
* Delay introduced in the form of a sleep to slow down the caller from the server side. Because this can hold
* off RPC threads it is not recommended to set a delay when using a direct executor in an event loop. Also,
* a max of 100 threads may be delayed before immediately returning
* @param name
* @param duration
* @param units
* @return Chainable builder
*/
public BuilderT partitionRejectDelay(String name, long duration, TimeUnit units) {
partitions.computeIfAbsent(name, Partition::new).setBackoffMillis(units.toMillis(duration));
return self();
}
/**
* Set the maximum number of threads that can be held up or delayed when rejecting excessive traffic for a partition.
* The default value is 100.
* @param maxDelayedThreads
* @return Chainable builder
*/
public BuilderT maxDelayedThreads(int maxDelayedThreads) {
this.maxDelayedThreads = maxDelayedThreads;
return self();
}
protected boolean hasPartitions() {
return !partitions.isEmpty();
}
public Limiter<ContextT> build() {
return this.hasPartitions() && !partitionResolvers.isEmpty()
? new AbstractPartitionedLimiter<ContextT>(this) {}
: new SimpleLimiter<>(this);
}
}
static class Partition {
private final String name;
private double percent = 0.0;
private int limit = 0;
private int busy = 0;
private long backoffMillis = 0;
private MetricRegistry.SampleListener inflightDistribution;
Partition(String name) {
this.name = name;
}
Partition setPercent(double percent) {
this.percent = percent;
return this;
}
Partition setBackoffMillis(long backoffMillis) {
this.backoffMillis = backoffMillis;
return this;
}
void updateLimit(int totalLimit) {
// Calculate this bin's limit while rounding up and ensuring the value
// is at least 1. With this technique the sum of bin limits may end up being
// higher than the concurrency limit.
this.limit = (int)Math.max(1, Math.ceil(totalLimit * percent));
}
boolean isLimitExceeded() {
return busy >= limit;
}
void acquire() {
busy++;
inflightDistribution.addSample(busy);
}
void release() {
busy--;
}
int getLimit() {
return limit;
}
public int getInflight() {
return busy;
}
double getPercent() {
return percent;
}
void createMetrics(MetricRegistry registry) {
this.inflightDistribution = registry.distribution(MetricIds.INFLIGHT_NAME, PARTITION_TAG_NAME, name);
registry.gauge(MetricIds.PARTITION_LIMIT_NAME, this::getLimit, PARTITION_TAG_NAME, name);
}
@Override
public String toString() {
return "Partition [pct=" + percent + ", limit=" + limit + ", busy=" + busy + "]";
}
}
private final Map<String, Partition> partitions;
private final Partition unknownPartition;
private final List<Function<ContextT, String>> partitionResolvers;
private final ReentrantLock lock = new ReentrantLock();
private final AtomicInteger delayedThreads = new AtomicInteger();
private final int maxDelayedThreads;
public AbstractPartitionedLimiter(Builder<?, ContextT> builder) {
super(builder);
Preconditions.checkArgument(!builder.partitions.isEmpty(), "No partitions specified");
Preconditions.checkArgument(builder.partitions.values().stream().map(Partition::getPercent).reduce(0.0, Double::sum) <= 1.0,
"Sum of percentages must be <= 1.0");
this.partitions = new HashMap<>(builder.partitions);
this.partitions.forEach((name, partition) -> partition.createMetrics(builder.registry));
this.unknownPartition = new Partition("unknown");
this.unknownPartition.createMetrics(builder.registry);
this.partitionResolvers = builder.partitionResolvers;
this.maxDelayedThreads = builder.maxDelayedThreads;
onNewLimit(getLimit());
}
private Partition resolvePartition(ContextT context) {
for (Function<ContextT, String> resolver : this.partitionResolvers) {
String name = resolver.apply(context);
if (name != null) {
Partition partition = partitions.get(name);
if (partition != null) {
return partition;
}
}
}
return unknownPartition;
}
@Override
public Optional<Listener> acquire(ContextT context) {
final Partition partition = resolvePartition(context);
try {
lock.lock();
if (getInflight() >= getLimit() && partition.isLimitExceeded()) {
lock.unlock();
if (partition.backoffMillis > 0 && delayedThreads.get() < maxDelayedThreads) {
try {
delayedThreads.incrementAndGet();
TimeUnit.MILLISECONDS.sleep(partition.backoffMillis);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} finally {
delayedThreads.decrementAndGet();
}
}
return createRejectedListener();
}
partition.acquire();
final Listener listener = createListener();
return Optional.of(new Listener() {
@Override
public void onSuccess() {
listener.onSuccess();
releasePartition(partition);
}
@Override
public void onIgnore() {
listener.onIgnore();
releasePartition(partition);
}
@Override
public void onDropped() {
listener.onDropped();
releasePartition(partition);
}
});
} finally {
if (lock.isHeldByCurrentThread())
lock.unlock();
}
}
private void releasePartition(Partition partition) {
try {
lock.lock();
partition.release();
} finally {
lock.unlock();
}
}
@Override
protected void onNewLimit(int newLimit) {
super.onNewLimit(newLimit);
partitions.forEach((name, partition) -> partition.updateLimit(newLimit));
}
Partition getPartition(String name) {
return partitions.get(name);
}
}
| 3,087 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/AbstractLimiter.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limiter;
import com.netflix.concurrency.limits.Limit;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.MetricIds;
import com.netflix.concurrency.limits.MetricRegistry;
import com.netflix.concurrency.limits.internal.EmptyMetricRegistry;
import com.netflix.concurrency.limits.limit.VegasLimit;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
public abstract class AbstractLimiter<ContextT> implements Limiter<ContextT> {
public static final String ID_TAG = "id";
public static final String STATUS_TAG = "status";
public abstract static class Builder<BuilderT extends Builder<BuilderT>> {
private static final AtomicInteger idCounter = new AtomicInteger();
private Limit limit = VegasLimit.newDefault();
private Supplier<Long> clock = System::nanoTime;
protected String name = "unnamed-" + idCounter.incrementAndGet();
protected MetricRegistry registry = EmptyMetricRegistry.INSTANCE;
public BuilderT named(String name) {
this.name = name;
return self();
}
public BuilderT limit(Limit limit) {
this.limit = limit;
return self();
}
public BuilderT clock(Supplier<Long> clock) {
this.clock = clock;
return self();
}
public BuilderT metricRegistry(MetricRegistry registry) {
this.registry = registry;
return self();
}
protected abstract BuilderT self();
}
private final AtomicInteger inFlight = new AtomicInteger();
private final Supplier<Long> clock;
private final Limit limitAlgorithm;
private final MetricRegistry.Counter successCounter;
private final MetricRegistry.Counter droppedCounter;
private final MetricRegistry.Counter ignoredCounter;
private final MetricRegistry.Counter rejectedCounter;
private volatile int limit;
protected AbstractLimiter(Builder<?> builder) {
this.clock = builder.clock;
this.limitAlgorithm = builder.limit;
this.limit = limitAlgorithm.getLimit();
this.limitAlgorithm.notifyOnChange(this::onNewLimit);
builder.registry.gauge(MetricIds.LIMIT_NAME, this::getLimit);
this.successCounter = builder.registry.counter(MetricIds.CALL_NAME, ID_TAG, builder.name, STATUS_TAG, "success");
this.droppedCounter = builder.registry.counter(MetricIds.CALL_NAME, ID_TAG, builder.name, STATUS_TAG, "dropped");
this.ignoredCounter = builder.registry.counter(MetricIds.CALL_NAME, ID_TAG, builder.name, STATUS_TAG, "ignored");
this.rejectedCounter = builder.registry.counter(MetricIds.CALL_NAME, ID_TAG, builder.name, STATUS_TAG, "rejected");
}
protected Optional<Listener> createRejectedListener() {
this.rejectedCounter.increment();
return Optional.empty();
}
protected Listener createListener() {
final long startTime = clock.get();
final int currentInflight = inFlight.incrementAndGet();
return new Listener() {
@Override
public void onSuccess() {
inFlight.decrementAndGet();
successCounter.increment();
limitAlgorithm.onSample(startTime, clock.get() - startTime, currentInflight, false);
}
@Override
public void onIgnore() {
inFlight.decrementAndGet();
ignoredCounter.increment();
}
@Override
public void onDropped() {
inFlight.decrementAndGet();
droppedCounter.increment();
limitAlgorithm.onSample(startTime, clock.get() - startTime, currentInflight, true);
}
};
}
public int getLimit() {
return limit;
}
public int getInflight() { return inFlight.get(); }
protected void onNewLimit(int newLimit) {
limit = newLimit;
}
}
| 3,088 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/SimpleLimiter.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limiter;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.MetricIds;
import com.netflix.concurrency.limits.MetricRegistry;
import java.util.Optional;
import java.util.concurrent.Semaphore;
public class SimpleLimiter<ContextT> extends AbstractLimiter<ContextT> {
public static class Builder extends AbstractLimiter.Builder<Builder> {
public <ContextT> SimpleLimiter<ContextT> build() {
return new SimpleLimiter<>(this);
}
@Override
protected Builder self() {
return this;
}
}
public static Builder newBuilder() {
return new Builder();
}
private final MetricRegistry.SampleListener inflightDistribution;
private final AdjustableSemaphore semaphore;
public SimpleLimiter(AbstractLimiter.Builder<?> builder) {
super(builder);
this.inflightDistribution = builder.registry.distribution(MetricIds.INFLIGHT_NAME);
this.semaphore = new AdjustableSemaphore(getLimit());
}
@Override
public Optional<Limiter.Listener> acquire(ContextT context) {
if (!semaphore.tryAcquire()) {
return createRejectedListener();
}
Listener listener = new Listener(createListener());
inflightDistribution.addSample(getInflight());
return Optional.of(listener);
}
@Override
protected void onNewLimit(int newLimit) {
int oldLimit = this.getLimit();
super.onNewLimit(newLimit);
if (newLimit > oldLimit) {
semaphore.release(newLimit - oldLimit);
} else {
semaphore.reducePermits(oldLimit - newLimit);
}
}
/**
* Simple Semaphore subclass that allows access to its reducePermits method.
*/
private static final class AdjustableSemaphore extends Semaphore {
AdjustableSemaphore(int permits) {
super(permits);
}
@Override
public void reducePermits(int reduction) {
super.reducePermits(reduction);
}
}
private class Listener implements Limiter.Listener {
private final Limiter.Listener delegate;
Listener(Limiter.Listener delegate) {
this.delegate = delegate;
}
@Override
public void onSuccess() {
delegate.onSuccess();
semaphore.release();
}
@Override
public void onIgnore() {
delegate.onIgnore();
semaphore.release();
}
@Override
public void onDropped() {
delegate.onDropped();
semaphore.release();
}
}
}
| 3,089 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/LifoBlockingLimiter.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limiter;
import java.util.Deque;
import java.util.LinkedList;
import java.util.Optional;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import com.netflix.concurrency.limits.Limiter;
/**
* {@link Limiter} decorator that blocks the caller when the limit has been reached. This
* strategy ensures the resource is properly protected but favors availability over latency
* by not fast failing requests when the limit has been reached. To help keep success latencies
* low and minimize timeouts any blocked requests are processed in last in/first out order.
*
* Use this limiter only when the threading model allows the limiter to be blocked.
*
* @param <ContextT>
*/
public final class LifoBlockingLimiter<ContextT> implements Limiter<ContextT> {
public static class Builder<ContextT> {
private final Limiter<ContextT> delegate;
private int maxBacklogSize = 100;
private Function<ContextT, Long> maxBacklogTimeoutMillis = context -> 1_000L;
private Builder(Limiter<ContextT> delegate) {
this.delegate = delegate;
}
/**
* Set maximum number of blocked threads
*
* @param size New max size. Default is 100.
* @return Chainable builder
*/
public Builder<ContextT> backlogSize(int size) {
this.maxBacklogSize = size;
return this;
}
/**
* @deprecated Use {@link #backlogSize}
*/
@Deprecated
public Builder<ContextT> maxBacklogSize(int size) {
this.maxBacklogSize = size;
return this;
}
/**
* Set maximum timeout for threads blocked on the limiter.
* Default is 1 second.
*
* @param timeout
* @param units
* @return Chainable builder
*/
public Builder<ContextT> backlogTimeout(long timeout, TimeUnit units) {
return backlogTimeoutMillis(units.toMillis(timeout));
}
/**
* Set maximum timeout for threads blocked on the limiter.
* Default is 1 second.
*
* @param timeout
* @return Chainable builder
*/
public Builder<ContextT> backlogTimeoutMillis(long timeout) {
this.maxBacklogTimeoutMillis = context -> timeout;
return this;
}
/**
* Function to derive the backlog timeout from the request context. This allows timeouts
* to be set dynamically based on things like request deadlines.
* @param mapper
* @param units
* @return
*/
public Builder<ContextT> backlogTimeout(Function<ContextT, Long> mapper, TimeUnit units) {
this.maxBacklogTimeoutMillis = context -> units.toMillis(mapper.apply(context));
return this;
}
public LifoBlockingLimiter<ContextT> build() {
return new LifoBlockingLimiter<ContextT>(this);
}
}
public static <ContextT> Builder<ContextT> newBuilder(Limiter<ContextT> delegate) {
return new Builder<ContextT>(delegate);
}
private final Limiter<ContextT> delegate;
private static class ListenerHolder<ContextT> {
private volatile Optional<Listener> listener = Optional.empty();
private final CountDownLatch latch = new CountDownLatch(1);
private ContextT context;
public ListenerHolder(ContextT context) {
this.context = context;
}
public boolean await(long timeout, TimeUnit unit) throws InterruptedException {
return latch.await(timeout, unit);
}
public void set(Optional<Listener> listener) {
this.listener = listener;
latch.countDown();
}
}
/**
* Lock used to block and unblock callers as the limit is reached
*/
private final Deque<ListenerHolder<ContextT>> backlog = new LinkedList<>();
private final AtomicInteger backlogCounter = new AtomicInteger();
private final int backlogSize;
private final Function<ContextT, Long> backlogTimeoutMillis;
private final Object lock = new Object();
private LifoBlockingLimiter(Builder<ContextT> builder) {
this.delegate = builder.delegate;
this.backlogSize = builder.maxBacklogSize;
this.backlogTimeoutMillis = builder.maxBacklogTimeoutMillis;
}
private Optional<Listener> tryAcquire(ContextT context) {
// Try to acquire a token and return immediately if successful
final Optional<Listener> listener = delegate.acquire(context);
if (listener.isPresent()) {
return listener;
}
// Restrict backlog size so the queue doesn't grow unbounded during an outage
if (backlogCounter.get() >= this.backlogSize) {
return Optional.empty();
}
// Create a holder for a listener and block until a listener is released by another
// operation. Holders will be unblocked in LIFO order
backlogCounter.incrementAndGet();
final ListenerHolder<ContextT> event = new ListenerHolder<>(context);
try {
synchronized (lock) {
backlog.addFirst(event);
}
if (!event.await(backlogTimeoutMillis.apply(context), TimeUnit.MILLISECONDS)) {
// Remove the holder from the backlog. This item is likely to be at the end of the
// list so do a removeLastOccurance to minimize the number of items to traverse
synchronized (lock) {
backlog.removeLastOccurrence(event);
}
// if we acquired a token just as we were timing out then return it, otherwise the
// token would get lost
return event.listener;
}
return event.listener;
} catch (InterruptedException e) {
synchronized (lock) {
backlog.removeFirstOccurrence(event);
}
Thread.currentThread().interrupt();
// if we acquired a token just as we were interrupted, then return it
return event.listener;
} finally {
backlogCounter.decrementAndGet();
}
}
private void unblock() {
synchronized (lock) {
if (!backlog.isEmpty()) {
final ListenerHolder<ContextT> event = backlog.peekFirst();
final Optional<Listener> listener = delegate.acquire(event.context);
if (listener.isPresent()) {
backlog.removeFirst();
event.set(listener);
} else {
// Still can't acquire the limit. unblock will be called again next time
// the limit is released.
return;
}
}
}
}
@Override
public Optional<Listener> acquire(ContextT context) {
return tryAcquire(context).map(delegate -> new Listener() {
@Override
public void onSuccess() {
delegate.onSuccess();
unblock();
}
@Override
public void onIgnore() {
delegate.onIgnore();
unblock();
}
@Override
public void onDropped() {
delegate.onDropped();
unblock();
}
});
}
@Override
public String toString() {
return "BlockingLimiter [" + delegate + "]";
}
}
| 3,090 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/BlockingLimiter.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.limiter;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.internal.Preconditions;
import java.time.Duration;
import java.time.Instant;
import java.util.Optional;
/**
* {@link Limiter} that blocks the caller when the limit has been reached. The caller is
* blocked until the limiter has been released, or a timeout is reached. This limiter is
* commonly used in batch clients that use the limiter as a back-pressure mechanism.
*
* @param <ContextT>
*/
public final class BlockingLimiter<ContextT> implements Limiter<ContextT> {
public static final Duration MAX_TIMEOUT = Duration.ofHours(1);
/**
* Wrap a limiter such that acquire will block up to {@link BlockingLimiter#MAX_TIMEOUT} if the limit was reached
* instead of return an empty listener immediately
* @param delegate Non-blocking limiter to wrap
* @return Wrapped limiter
*/
public static <ContextT> BlockingLimiter<ContextT> wrap(Limiter<ContextT> delegate) {
return new BlockingLimiter<>(delegate, MAX_TIMEOUT);
}
/**
* Wrap a limiter such that acquire will block up to a provided timeout if the limit was reached
* instead of return an empty listener immediately
*
* @param delegate Non-blocking limiter to wrap
* @param timeout Max amount of time to wait for the wait for the limit to be released. Cannot exceed {@link BlockingLimiter#MAX_TIMEOUT}
* @return Wrapped limiter
*/
public static <ContextT> BlockingLimiter<ContextT> wrap(Limiter<ContextT> delegate, Duration timeout) {
Preconditions.checkArgument(timeout.compareTo(MAX_TIMEOUT) < 0, "Timeout cannot be greater than " + MAX_TIMEOUT);
return new BlockingLimiter<>(delegate, timeout);
}
private final Limiter<ContextT> delegate;
private final Duration timeout;
/**
* Lock used to block and unblock callers as the limit is reached
*/
private final Object lock = new Object();
private BlockingLimiter(Limiter<ContextT> limiter, Duration timeout) {
this.delegate = limiter;
this.timeout = timeout;
}
private Optional<Listener> tryAcquire(ContextT context) {
final Instant deadline = Instant.now().plus(timeout);
synchronized (lock) {
while (true) {
long timeout = Duration.between(Instant.now(), deadline).toMillis();
if (timeout <= 0) {
return Optional.empty();
}
// Try to acquire a token and return immediately if successful
final Optional<Listener> listener = delegate.acquire(context);
if (listener.isPresent()) {
return listener;
}
// We have reached the limit so block until a token is released
try {
lock.wait(timeout);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return Optional.empty();
}
}
}
}
private void unblock() {
synchronized (lock) {
lock.notifyAll();
}
}
@Override
public Optional<Listener> acquire(ContextT context) {
return tryAcquire(context).map(delegate -> new Listener() {
@Override
public void onSuccess() {
delegate.onSuccess();
unblock();
}
@Override
public void onIgnore() {
delegate.onIgnore();
unblock();
}
@Override
public void onDropped() {
delegate.onDropped();
unblock();
}
});
}
@Override
public String toString() {
return "BlockingLimiter [" + delegate + "]";
}
}
| 3,091 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/internal/Preconditions.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.internal;
public final class Preconditions {
public static void checkArgument(boolean expression, Object errorMessage) {
if (!expression) {
throw new IllegalArgumentException(String.valueOf(errorMessage));
}
}
public static void checkState(boolean expression, Object errorMessage) {
if (!expression) {
throw new IllegalStateException(String.valueOf(errorMessage));
}
}
private Preconditions() {
}
}
| 3,092 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/internal/EmptyMetricRegistry.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.internal;
import com.netflix.concurrency.limits.MetricRegistry;
import java.util.function.Supplier;
public final class EmptyMetricRegistry implements MetricRegistry {
public static final EmptyMetricRegistry INSTANCE = new EmptyMetricRegistry();
private EmptyMetricRegistry() {}
@Override
public SampleListener distribution(String id, String... tagNameValuePairs) {
return value -> { };
}
@Override
public void gauge(String id, Supplier<Number> supplier, String... tagNameValuePairs) {
}
}
| 3,093 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/executors/BlockingAdaptiveExecutor.java | /**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.concurrency.limits.executors;
import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.Limiter.Listener;
import com.netflix.concurrency.limits.MetricRegistry;
import com.netflix.concurrency.limits.internal.EmptyMetricRegistry;
import com.netflix.concurrency.limits.limit.AIMDLimit;
import com.netflix.concurrency.limits.limiter.BlockingLimiter;
import com.netflix.concurrency.limits.limiter.SimpleLimiter;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* {@link Executor} which uses a {@link Limiter} to determine the size of the thread pool.
* Any {@link Runnable} executed once the limit has been reached will block the calling
* thread until the limit is released.
*
* Operations submitted to this executor should be homogeneous and have similar
* long term latency characteristics. RTT samples will only be taken from successful
* operations. The {@link Runnable} should throw a {@link UncheckedTimeoutException} if
* a request timed out or some external limit was reached. All other exceptions will be
* ignored.
*/
public final class BlockingAdaptiveExecutor implements Executor {
public static class Builder {
private static AtomicInteger idCounter = new AtomicInteger();
private MetricRegistry metricRegistry = EmptyMetricRegistry.INSTANCE;
private Executor executor;
private Limiter<Void> limiter;
private String name;
public Builder metricRegistry(MetricRegistry metricRegistry) {
this.metricRegistry = metricRegistry;
return this;
}
public Builder executor(Executor executor) {
this.executor = executor;
return this;
}
public Builder limiter(Limiter<Void> limiter) {
this.limiter = limiter;
return this;
}
public Builder name(String name) {
this.name = name;
return this;
}
public BlockingAdaptiveExecutor build() {
if (name == null) {
name = "unnamed-" + idCounter.incrementAndGet();
}
if (executor == null) {
executor = Executors.newCachedThreadPool(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setDaemon(true);
return thread;
}
});
}
if (limiter == null) {
limiter = SimpleLimiter.newBuilder()
.metricRegistry(metricRegistry)
.limit(AIMDLimit.newBuilder().build())
.build();
}
return new BlockingAdaptiveExecutor(this);
}
}
public static Builder newBuilder() {
return new Builder();
}
private final Limiter<Void> limiter;
private final Executor executor;
private BlockingAdaptiveExecutor(Builder builder) {
this.limiter = builder.limiter;
this.executor = builder.executor;
}
@Deprecated
public BlockingAdaptiveExecutor(Limiter<Void> limiter) {
this(limiter, Executors.newCachedThreadPool());
}
@Deprecated
public BlockingAdaptiveExecutor(Limiter<Void> limiter, Executor executor) {
this.limiter = BlockingLimiter.wrap(limiter);
this.executor = executor;
}
@Override
public void execute(Runnable command) {
Listener listener = limiter.acquire(null).orElseThrow(() -> new RejectedExecutionException());
try {
executor.execute(() -> {
try {
command.run();
listener.onSuccess();
} catch (UncheckedTimeoutException e) {
listener.onDropped();
} catch (RejectedExecutionException e) {
// TODO: Remove support for RejectedExecutionException here.
listener.onDropped();
} catch (Exception e) {
// We have no idea what caused the exception. It could be an NPE thrown immediately on the client
// or some remote call failure. The only sane thing to do here is just ignore this request
listener.onIgnore();
}
});
} catch (Exception e) {
listener.onIgnore();
throw e;
}
}
}
| 3,094 |
0 | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits | Create_ds/concurrency-limits/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/executors/UncheckedTimeoutException.java | package com.netflix.concurrency.limits.executors;
public class UncheckedTimeoutException extends RuntimeException {
private static final long serialVersionUID = 0;
public UncheckedTimeoutException() {}
public UncheckedTimeoutException(String message) {
super(message);
}
public UncheckedTimeoutException(Throwable cause) {
super(cause);
}
public UncheckedTimeoutException(String message, Throwable cause) {
super(message, cause);
}
}
| 3,095 |
0 | Create_ds/concurrency-limits/concurrency-limits-servlet/src/test/java/com/netflix/concurrency | Create_ds/concurrency-limits/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterSimulationTest.java | package com.netflix.concurrency.limits;
import com.netflix.concurrency.limits.executors.BlockingAdaptiveExecutor;
import com.netflix.concurrency.limits.limit.FixedLimit;
import com.netflix.concurrency.limits.limit.VegasLimit;
import com.netflix.concurrency.limits.limiter.SimpleLimiter;
import com.netflix.concurrency.limits.servlet.ConcurrencyLimitServletFilter;
import com.netflix.concurrency.limits.servlet.ServletLimiterBuilder;
import org.eclipse.jetty.servlet.FilterHolder;
import org.junit.ClassRule;
import org.junit.Ignore;
import org.junit.Test;
import javax.servlet.DispatcherType;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.security.Principal;
import java.util.EnumSet;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
public class ConcurrencyLimitServletFilterSimulationTest {
@ClassRule
public static HttpServerRule server = new HttpServerRule(context -> {
context.addServlet(HelloServlet.class, "/");
Limiter<HttpServletRequest> limiter = new ServletLimiterBuilder()
.limit(FixedLimit.of(10))
.partitionByUserPrincipal(Principal::getName)
.partition("live", 0.8)
.partition("batch", 0.2)
.build();
FilterHolder holder = new FilterHolder();
holder.setFilter(new ConcurrencyLimitServletFilter(limiter));
context.addFilter(holder, "/*", EnumSet.of(DispatcherType.REQUEST));
});
@Test
@Ignore
public void simulation() throws Exception {
Limit limit = VegasLimit.newDefault();
BlockingAdaptiveExecutor executor = new BlockingAdaptiveExecutor(
SimpleLimiter.newBuilder().limit(limit).build());
AtomicInteger errors = new AtomicInteger();
AtomicInteger success = new AtomicInteger();
Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> {
System.out.println(String.format("errors=%d success=%d limit=%s", errors.getAndSet(0), success.getAndSet(0), limit));
}, 1, 1, TimeUnit.SECONDS);
while (true) {
executor.execute(() -> {
try {
server.get("/batch");
success.incrementAndGet();
} catch (Exception e) {
errors.incrementAndGet();
throw new RejectedExecutionException();
}
});
}
}
public static class HelloServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
@Override
protected void doGet( HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException {
try {
TimeUnit.MILLISECONDS.sleep(100);
} catch (InterruptedException e) {
}
response.setContentType("text/html");
response.setStatus(HttpServletResponse.SC_OK);
response.getWriter().println("Hello from HelloServlet");
}
}
}
| 3,096 |
0 | Create_ds/concurrency-limits/concurrency-limits-servlet/src/test/java/com/netflix/concurrency | Create_ds/concurrency-limits/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/GroupServletLimiterTest.java | package com.netflix.concurrency.limits;
import com.netflix.concurrency.limits.Limiter.Listener;
import com.netflix.concurrency.limits.limit.VegasLimit;
import com.netflix.concurrency.limits.servlet.ServletLimiterBuilder;
import java.security.Principal;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import javax.servlet.http.HttpServletRequest;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentMatchers;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class GroupServletLimiterTest {
@Test
public void userPrincipalMatchesGroup() {
Map<String, String> principalToGroup = Mockito.spy(new HashMap<>());
principalToGroup.put("bob", "live");
Limiter<HttpServletRequest> limiter = new ServletLimiterBuilder()
.limit(VegasLimit.newDefault())
.partitionByUserPrincipal(p -> principalToGroup.get(p.getName()))
.partition("live", 0.8)
.partition("batch", 0.2)
.build();
HttpServletRequest request = createMockRequestWithPrincipal("bob");
Optional<Listener> listener = limiter.acquire(request);
Assert.assertTrue(listener.isPresent());
Mockito.verify(principalToGroup, Mockito.times(1)).get("bob");
}
@Test
public void userPrincipalDoesNotMatchGroup() {
Map<String, String> principalToGroup = Mockito.spy(new HashMap<>());
principalToGroup.put("bob", "live");
Limiter<HttpServletRequest> limiter = new ServletLimiterBuilder()
.limit(VegasLimit.newDefault())
.partitionByUserPrincipal(p -> principalToGroup.get(p.getName()))
.partition("live", 0.8)
.partition("batch", 0.2)
.build();
HttpServletRequest request = createMockRequestWithPrincipal("doesntexist");
Optional<Listener> listener = limiter.acquire(request);
Assert.assertTrue(listener.isPresent());
Mockito.verify(principalToGroup, Mockito.times(1)).get("doesntexist");
}
@Test
public void nullUserPrincipalDoesNotMatchGroup() {
Map<String, String> principalToGroup = Mockito.spy(new HashMap<>());
principalToGroup.put("bob", "live");
Limiter<HttpServletRequest> limiter = new ServletLimiterBuilder()
.limit(VegasLimit.newDefault())
.partitionByUserPrincipal(p -> principalToGroup.get(p.getName()))
.partition("live", 0.8)
.partition("batch", 0.2)
.build();
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getUserPrincipal()).thenReturn(null);
Optional<Listener> listener = limiter.acquire(request);
Assert.assertTrue(listener.isPresent());
Mockito.verify(principalToGroup, Mockito.times(0)).get(Mockito.<String> any());
}
@Test
public void nullUserPrincipalNameDoesNotMatchGroup() {
Map<String, String> principalToGroup = Mockito.spy(new HashMap<>());
principalToGroup.put("bob", "live");
Limiter<HttpServletRequest> limiter = new ServletLimiterBuilder()
.limit(VegasLimit.newDefault())
.partitionByUserPrincipal(p -> principalToGroup.get(p.getName()))
.partition("live", 0.8)
.partition("batch", 0.2)
.build();
HttpServletRequest request = createMockRequestWithPrincipal(null);
Optional<Listener> listener = limiter.acquire(request);
Assert.assertTrue(listener.isPresent());
Mockito.verify(principalToGroup, Mockito.times(1)).get(ArgumentMatchers.isNull());
}
@Test
public void pathMatchesGroup() {
Map<String, String> pathToGroup = Mockito.spy(new HashMap<>());
pathToGroup.put("/live/path", "live");
Limiter<HttpServletRequest> limiter = new ServletLimiterBuilder()
.limit(VegasLimit.newDefault())
.partitionByPathInfo(pathToGroup::get)
.partition("live", 0.8)
.partition("batch", 0.2)
.build();
HttpServletRequest request = createMockRequestWithPathInfo("/live/path");
Optional<Listener> listener = limiter.acquire(request);
Assert.assertTrue(listener.isPresent());
Mockito.verify(pathToGroup, Mockito.times(1)).get("/live/path");
}
@Test
public void pathDoesNotMatchesGroup() {
Map<String, String> pathToGroup = Mockito.spy(new HashMap<>());
pathToGroup.put("/live/path", "live");
Limiter<HttpServletRequest> limiter = new ServletLimiterBuilder()
.limit(VegasLimit.newDefault())
.partitionByPathInfo(pathToGroup::get)
.partition("live", 0.8)
.partition("batch", 0.2)
.build();
HttpServletRequest request = createMockRequestWithPathInfo("/other/path");
Optional<Listener> listener = limiter.acquire(request);
Assert.assertTrue(listener.isPresent());
Mockito.verify(pathToGroup, Mockito.times(1)).get("/other/path");
}
@Test
public void nullPathDoesNotMatchesGroup() {
Map<String, String> pathToGroup = Mockito.spy(new HashMap<>());
pathToGroup.put("/live/path", "live");
Limiter<HttpServletRequest> limiter = new ServletLimiterBuilder()
.limit(VegasLimit.newDefault())
.partitionByPathInfo(pathToGroup::get)
.partition("live", 0.8)
.partition("batch", 0.2)
.build();
HttpServletRequest request = createMockRequestWithPathInfo(null);
Optional<Listener> listener = limiter.acquire(request);
Assert.assertTrue(listener.isPresent());
Mockito.verify(pathToGroup, Mockito.times(0)).get(Mockito.<String> any());
}
private HttpServletRequest createMockRequestWithPrincipal(String name) {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Principal principal = Mockito.mock(Principal.class);
Mockito.when(request.getUserPrincipal()).thenReturn(principal);
Mockito.when(principal.getName()).thenReturn(name);
return request;
}
private HttpServletRequest createMockRequestWithPathInfo(String name) {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getPathInfo()).thenReturn(name);
return request;
}
}
| 3,097 |
0 | Create_ds/concurrency-limits/concurrency-limits-servlet/src/test/java/com/netflix/concurrency | Create_ds/concurrency-limits/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterTest.java | package com.netflix.concurrency.limits;
import com.netflix.concurrency.limits.servlet.ConcurrencyLimitServletFilter;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.jupiter.MockitoExtension;
import org.springframework.mock.web.MockFilterChain;
import org.springframework.mock.web.MockHttpServletRequest;
import org.springframework.mock.web.MockHttpServletResponse;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.util.Optional;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
public class ConcurrencyLimitServletFilterTest {
@Mock
Limiter<HttpServletRequest> limiter;
@Mock
Limiter.Listener listener;
@Test
public void testDoFilterAllowed() throws ServletException, IOException {
ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter);
when(limiter.acquire(any())).thenReturn(Optional.of(listener));
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
MockFilterChain filterChain = new MockFilterChain();
filter.doFilter(request, response, filterChain);
assertEquals(request, filterChain.getRequest(), "Request should be passed to the downstream chain");
assertEquals(response, filterChain.getResponse(), "Response should be passed to the downstream chain");
verify(listener).onSuccess();
}
@Test
public void testDoFilterThrottled() throws ServletException, IOException {
ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter);
//Empty means to throttle this request
when(limiter.acquire(any())).thenReturn(Optional.empty());
MockHttpServletResponse response = new MockHttpServletResponse();
MockFilterChain filterChain = new MockFilterChain();
filter.doFilter(new MockHttpServletRequest(), response, filterChain);
assertNull(filterChain.getRequest(), "doFilter should not be called on the filterchain");
assertEquals(429, response.getStatus(), "Status should be 429 - too many requests");
}
@Test
public void testDoFilterThrottledCustomStatus() throws ServletException, IOException {
final int customThrottleStatus = 503;
ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter, customThrottleStatus);
//Empty means to throttle this request
when(limiter.acquire(any())).thenReturn(Optional.empty());
MockHttpServletResponse response = new MockHttpServletResponse();
filter.doFilter(new MockHttpServletRequest(), response, new MockFilterChain());
assertEquals(customThrottleStatus, response.getStatus(), "custom status should be respected");
}
}
| 3,098 |
0 | Create_ds/concurrency-limits/concurrency-limits-servlet/src/test/java/com/netflix/concurrency | Create_ds/concurrency-limits/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/HttpServerRule.java | package com.netflix.concurrency.limits;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHandler;
import org.junit.rules.ExternalResource;
public class HttpServerRule extends ExternalResource {
private Server server;
private final Consumer<ServletContextHandler> customizer;
public HttpServerRule(Consumer<ServletContextHandler> customizer) {
this.customizer = customizer;
}
protected void before() throws Throwable {
this.server = new Server(0);
ServletHandler handler = new ServletHandler();
server.setHandler(handler);
ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS);
context.setContextPath("/");
customizer.accept(context);
server.setHandler(context);
server.start();
}
/**
* Override to tear down your specific external resource.
*/
protected void after() {
if (server != null) {
try {
server.stop();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
public int getPort() {
return ((ServerConnector)server.getConnectors()[0]).getLocalPort();
}
public String get(String path) throws Exception {
URL url = new URL("http://localhost:" + getPort() + path);
HttpURLConnection con = (HttpURLConnection) url.openConnection();
con.setRequestMethod("GET");
int responseCode = con.getResponseCode();
if (responseCode != 200) {
throw new Exception(readString(con.getInputStream()));
} else {
return readString(con.getInputStream());
}
}
public String readString(InputStream is) throws IOException {
try (BufferedReader buffer = new BufferedReader(new InputStreamReader(is))) {
return buffer.lines().collect(Collectors.joining("\n"));
}
}
}
| 3,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.