index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/spectator/spectator-ext-log4j2/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-log4j2/src/main/java/com/netflix/spectator/log4j/LevelTag.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.log4j;
import com.netflix.spectator.api.Tag;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.spi.StandardLevel;
/**
* Tags based on the standard log4j levels. The key will be {@code loglevel} and the value will
* be the level name prefixed with the order so it will sort correctly.
*/
enum LevelTag implements Tag {
/** 0_OFF. */
OFF(StandardLevel.OFF),
/** 1_FATAL. */
FATAL(StandardLevel.FATAL),
/** 2_ERROR. */
ERROR(StandardLevel.ERROR),
/** 3_WARN. */
WARN(StandardLevel.WARN),
/** 4_INFO. */
INFO(StandardLevel.INFO),
/** 5_DEBUG. */
DEBUG(StandardLevel.DEBUG),
/** 6_TRACE. */
TRACE(StandardLevel.TRACE),
/** 7_ALL. */
ALL(StandardLevel.ALL);
private final StandardLevel std;
private final String value;
/** Create a new instance based on a standard log4j level. */
LevelTag(StandardLevel std) {
this.std = std;
this.value = String.format("%d_%s", ordinal(), std.name());
}
@Override public String key() {
return "loglevel";
}
@Override public String value() {
return value;
}
/** Return the corresponding standard log4j level. */
StandardLevel standardLevel() {
return std;
}
private static final LevelTag[] LEVELS = new LevelTag[8];
static {
for (LevelTag level : LevelTag.values()) {
LEVELS[level.ordinal()] = level;
}
}
/** Get the tag corresponding to the log4j level. */
static LevelTag get(Level level) {
final StandardLevel stdLevel = level.getStandardLevel();
return LEVELS[stdLevel.ordinal()];
}
}
| 6,000 |
0 | Create_ds/spectator/spectator-ext-log4j2/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-log4j2/src/main/java/com/netflix/spectator/log4j/SpectatorAppender.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.log4j;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Spectator;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.core.Appender;
import org.apache.logging.log4j.core.Filter;
import org.apache.logging.log4j.core.Layout;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.config.Configuration;
import org.apache.logging.log4j.core.LoggerContext;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.config.LoggerConfig;
import org.apache.logging.log4j.core.config.Property;
import org.apache.logging.log4j.core.config.plugins.Plugin;
import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
import org.apache.logging.log4j.core.config.plugins.PluginElement;
import org.apache.logging.log4j.core.config.plugins.PluginFactory;
import java.io.Serializable;
/**
* Appender that tracks the number of messages that pass through. If the {@code ignoreExceptions}
* option is set to false, a more detailed counter for the number of stack traces with the
* exception types and file will also get tracked.
*/
@Plugin(name = "Spectator", category = "Core", elementType = "appender", printObject = true)
public final class SpectatorAppender extends AbstractAppender {
private static void addToRootLogger(final Appender appender) {
LoggerContext context = (LoggerContext) LogManager.getContext(false);
Configuration config = context.getConfiguration();
LoggerConfig loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME);
loggerConfig.addAppender(appender, Level.ALL, null);
context.updateLoggers(config);
}
/**
* Add the spectator appender to the root logger. This method is intended to be called once
* as part of the applications initialization process.
*
* @param registry
* Spectator registry to use for the appender.
* @param name
* Name for the appender.
* @param ignoreExceptions
* If set to true then the stack trace metrics are disabled.
*/
public static void addToRootLogger(
Registry registry,
String name,
boolean ignoreExceptions) {
final Appender appender = new SpectatorAppender(
registry, name, null, null, ignoreExceptions, Property.EMPTY_ARRAY);
appender.start();
LoggerContext context = (LoggerContext) LogManager.getContext(false);
Configuration config = context.getConfiguration();
addToRootLogger(appender);
config.addListener(reconfigurable -> addToRootLogger(appender));
}
private static final long serialVersionUID = 42L;
private final transient Registry registry;
private transient Id[] numMessages;
private transient Id[] numStackTraces;
/** Create a new instance of the appender. */
SpectatorAppender(
Registry registry,
String name,
Filter filter,
Layout<? extends Serializable> layout,
boolean ignoreExceptions,
Property[] properties) {
super(name, filter, layout, ignoreExceptions, properties);
this.registry = registry;
}
/** Create a new instance of the appender using the global spectator registry. */
@PluginFactory
public static SpectatorAppender createAppender(
@PluginAttribute("name") String name,
@PluginAttribute("ignoreExceptions") boolean ignoreExceptions,
@PluginElement("Layout") Layout<? extends Serializable> layout,
@PluginElement("Filters") Filter filter) {
if (name == null) {
LOGGER.error("no name provided for SpectatorAppender");
return null;
}
return new SpectatorAppender(
Spectator.globalRegistry(),
name, filter, layout, ignoreExceptions,
Property.EMPTY_ARRAY);
}
@Override public void start() {
final LevelTag[] levels = LevelTag.values();
numMessages = new Id[levels.length];
numStackTraces = new Id[levels.length];
for (int i = 0; i < levels.length; ++i) {
numMessages[i] = registry.createId("log4j.numMessages")
.withTag("appender", getName())
.withTag(levels[i]);
numStackTraces[i] = registry.createId("log4j.numStackTraces")
.withTag("appender", getName())
.withTag(levels[i]);
}
super.start();
}
@Override public void append(LogEvent event) {
final LevelTag level = LevelTag.get(event.getLevel());
registry.counter(numMessages[level.ordinal()]).increment();
if (!ignoreExceptions() && event.getThrown() != null) {
final String file = (event.getSource() == null) ? "unknown" : event.getSource().getFileName();
Id stackTraceId = numStackTraces[level.ordinal()]
.withTag("exception", event.getThrown().getClass().getSimpleName())
.withTag("file", file == null ? "unknown" : file);
registry.counter(stackTraceId).increment();
}
}
}
| 6,001 |
0 | Create_ds/spectator/spectator-ext-aws2/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-aws2/src/test/java/com/netflix/spectator/aws2/SpectatorExecutionInterceptorTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.aws2;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.ManualClock;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Timer;
import com.netflix.spectator.api.Utils;
import com.netflix.spectator.ipc.IpcMetric;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.reactivestreams.Publisher;
import software.amazon.awssdk.awscore.exception.AwsErrorDetails;
import software.amazon.awssdk.awscore.exception.AwsServiceException;
import software.amazon.awssdk.core.SdkRequest;
import software.amazon.awssdk.core.SdkResponse;
import software.amazon.awssdk.core.async.AsyncRequestBody;
import software.amazon.awssdk.core.interceptor.Context;
import software.amazon.awssdk.core.interceptor.ExecutionAttributes;
import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute;
import software.amazon.awssdk.core.sync.RequestBody;
import software.amazon.awssdk.http.SdkHttpMethod;
import software.amazon.awssdk.http.SdkHttpRequest;
import software.amazon.awssdk.http.SdkHttpResponse;
import java.io.InputStream;
import java.net.ConnectException;
import java.net.URI;
import java.nio.ByteBuffer;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
public class SpectatorExecutionInterceptorTest {
private static final int RETRIES = 3;
private ManualClock clock;
private Registry registry;
private SpectatorExecutionInterceptor interceptor;
@BeforeEach
public void before() {
clock = new ManualClock();
registry = new DefaultRegistry(clock);
interceptor = new SpectatorExecutionInterceptor(registry);
}
@AfterEach
public void after() {
IpcMetric.validate(registry, true);
}
private void execute(TestContext context, ExecutionAttributes attrs, long latency) {
interceptor.beforeExecution(context, attrs);
interceptor.modifyRequest(context, attrs);
interceptor.beforeMarshalling(context, attrs);
interceptor.afterMarshalling(context, attrs);
interceptor.modifyHttpRequest(context, attrs);
interceptor.beforeTransmission(context, attrs);
clock.setMonotonicTime(latency);
if (context.httpResponse() == null) {
// Simulate network failure with no response received
for (int i = 0; i < RETRIES; ++i) {
interceptor.beforeTransmission(context, attrs);
clock.setMonotonicTime(clock.monotonicTime() + latency);
}
interceptor.onExecutionFailure(context.failureContext(), attrs);
} else {
interceptor.afterTransmission(context, attrs);
interceptor.modifyHttpResponse(context, attrs);
interceptor.beforeUnmarshalling(context, attrs);
if (context.isFailure()) {
interceptor.onExecutionFailure(context.failureContext(), attrs);
} else {
interceptor.afterUnmarshalling(context, attrs);
interceptor.modifyResponse(context, attrs);
interceptor.afterExecution(context, attrs);
}
}
}
private ExecutionAttributes createAttributes(String service, String op) {
ExecutionAttributes attrs = new ExecutionAttributes();
attrs.putAttribute(SdkExecutionAttribute.SERVICE_NAME, service);
attrs.putAttribute(SdkExecutionAttribute.OPERATION_NAME, op);
return attrs;
}
private String get(Id id, String k) {
return Utils.getTagValue(id, k);
}
private long millis(long v) {
return TimeUnit.MILLISECONDS.toNanos(v);
}
@Test
public void successfulRequest() {
SdkHttpRequest request = SdkHttpRequest.builder()
.method(SdkHttpMethod.POST)
.uri(URI.create("https://ec2.us-east-1.amazonaws.com"))
.build();
SdkHttpResponse response = SdkHttpResponse.builder()
.statusCode(200)
.build();
TestContext context = new TestContext(request, response);
execute(context, createAttributes("EC2", "DescribeInstances"), millis(42));
Assertions.assertEquals(1, registry.timers().count());
Timer t = registry.timers().findFirst().orElse(null);
Assertions.assertNotNull(t);
Assertions.assertEquals(1, t.count());
Assertions.assertEquals(millis(42), t.totalTime());
Assertions.assertEquals("EC2.DescribeInstances", get(t.id(), "ipc.endpoint"));
Assertions.assertEquals("200", get(t.id(), "http.status"));
Assertions.assertEquals("POST", get(t.id(), "http.method"));
}
@Test
public void networkFailure() {
SdkHttpRequest request = SdkHttpRequest.builder()
.method(SdkHttpMethod.POST)
.uri(URI.create("https://ec2.us-east-1.amazonaws.com"))
.build();
Throwable error = new ConnectException("failed to connect");
TestContext context = new TestContext(request, null, error);
execute(context, createAttributes("EC2", "DescribeInstances"), millis(30));
Assertions.assertEquals(2, registry.timers().count());
registry.timers().forEach(t -> {
Assertions.assertEquals("EC2.DescribeInstances", get(t.id(), "ipc.endpoint"));
switch ((int) t.count()) {
case 1:
Assertions.assertEquals("connection_error", get(t.id(), "ipc.status"));
Assertions.assertEquals("ConnectException", get(t.id(), "ipc.status.detail"));
break;
case 3:
// Captured for the retries attempts, we do not know the exception so it should have
// an unexpected status
Assertions.assertEquals("unexpected_error", get(t.id(), "ipc.status"));
break;
default:
Assertions.fail("unexpected count: " + t.id() + " = " + t.count());
}
});
}
@Test
public void awsFailure() {
SdkHttpRequest request = SdkHttpRequest.builder()
.method(SdkHttpMethod.POST)
.uri(URI.create("https://ec2.us-east-1.amazonaws.com"))
.build();
SdkHttpResponse response = SdkHttpResponse.builder()
.statusCode(403)
.build();
Throwable error = AwsServiceException.builder()
.awsErrorDetails(AwsErrorDetails.builder()
.errorCode("AccessDenied")
.errorMessage("credentials have expired")
.build())
.build();
TestContext context = new TestContext(request, response, error);
execute(context, createAttributes("EC2", "DescribeInstances"), millis(30));
Assertions.assertEquals(1, registry.timers().count());
Timer t = registry.timers().findFirst().orElse(null);
Assertions.assertNotNull(t);
Assertions.assertEquals(1, t.count());
Assertions.assertEquals(millis(30), t.totalTime());
Assertions.assertEquals("403", get(t.id(), "http.status"));
Assertions.assertEquals("AccessDenied", get(t.id(), "ipc.status.detail"));
}
@Test
public void awsThrottling() {
SdkHttpRequest request = SdkHttpRequest.builder()
.method(SdkHttpMethod.POST)
.uri(URI.create("https://ec2.us-east-1.amazonaws.com"))
.build();
SdkHttpResponse response = SdkHttpResponse.builder()
.statusCode(400)
.build();
Throwable error = AwsServiceException.builder()
.awsErrorDetails(AwsErrorDetails.builder()
.errorCode("Throttling")
.errorMessage("too many requests")
.build())
.build();
TestContext context = new TestContext(request, response, error);
execute(context, createAttributes("EC2", "DescribeInstances"), millis(30));
Assertions.assertEquals(1, registry.timers().count());
Timer t = registry.timers().findFirst().orElse(null);
Assertions.assertNotNull(t);
Assertions.assertEquals(1, t.count());
Assertions.assertEquals(millis(30), t.totalTime());
Assertions.assertEquals("400", get(t.id(), "http.status"));
Assertions.assertEquals("throttled", get(t.id(), "ipc.status"));
}
private void parseRetryHeaderTest(String expected, String header) {
SdkHttpRequest request = SdkHttpRequest.builder()
.method(SdkHttpMethod.POST)
.uri(URI.create("https://ec2.us-east-1.amazonaws.com"))
.appendHeader("amz-sdk-retry", header)
.build();
SdkHttpResponse response = SdkHttpResponse.builder()
.statusCode(200)
.build();
TestContext context = new TestContext(request, response);
execute(context, createAttributes("EC2", "DescribeInstances"), millis(30));
Assertions.assertEquals(1, registry.timers().count());
Timer t = registry.timers().findFirst().orElse(null);
Assertions.assertNotNull(t);
Assertions.assertEquals(1, t.count());
Assertions.assertEquals(millis(30), t.totalTime());
Assertions.assertEquals(expected, get(t.id(), "ipc.attempt"));
}
@Test
public void parseRetryHeaderInitial() {
parseRetryHeaderTest("initial", "0/NotUsed");
}
@Test
public void parseRetryHeaderSecond() {
parseRetryHeaderTest("second", "1/NotUsed");
}
@Test
public void parseRetryHeaderThird() {
parseRetryHeaderTest("third_up", "2/NotUsed");
}
@Test
public void parseRetryHeader50() {
parseRetryHeaderTest("third_up", "50/NotUsed");
}
@Test
public void parseRetryHeaderInvalidNumber() {
parseRetryHeaderTest("unknown", "foo/bar");
}
@Test
public void parseRetryHeaderBadFormat() {
parseRetryHeaderTest("unknown", "foo");
}
private static class TestContext implements Context.AfterExecution {
private SdkHttpRequest request;
private SdkHttpResponse response;
private Throwable error;
public TestContext(SdkHttpRequest request, SdkHttpResponse response) {
this(request, response, null);
}
public TestContext(SdkHttpRequest request, SdkHttpResponse response, Throwable error) {
this.request = request;
this.response = response;
this.error = error;
}
@Override public SdkResponse response() {
return null;
}
@Override public SdkHttpResponse httpResponse() {
return response;
}
@Override public Optional<Publisher<ByteBuffer>> responsePublisher() {
return Optional.empty();
}
@Override public Optional<InputStream> responseBody() {
return Optional.empty();
}
@Override public SdkHttpRequest httpRequest() {
return request;
}
@Override public Optional<RequestBody> requestBody() {
return Optional.empty();
}
@Override public Optional<AsyncRequestBody> asyncRequestBody() {
return Optional.empty();
}
@Override public SdkRequest request() {
return null;
}
boolean isFailure() {
return error != null;
}
Context.FailedExecution failureContext() {
return new Context.FailedExecution() {
@Override
public Throwable exception() {
return error;
}
@Override
public SdkRequest request() {
return null;
}
@Override
public Optional<SdkHttpRequest> httpRequest() {
return Optional.ofNullable(request);
}
@Override
public Optional<SdkHttpResponse> httpResponse() {
return Optional.ofNullable(response);
}
@Override
public Optional<SdkResponse> response() {
return Optional.empty();
}
};
}
}
}
| 6,002 |
0 | Create_ds/spectator/spectator-ext-aws2/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-aws2/src/main/java/com/netflix/spectator/aws2/SpectatorExecutionInterceptor.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.aws2;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Spectator;
import com.netflix.spectator.ipc.IpcAttempt;
import com.netflix.spectator.ipc.IpcLogEntry;
import com.netflix.spectator.ipc.IpcLogger;
import com.netflix.spectator.ipc.IpcProtocol;
import com.netflix.spectator.ipc.IpcStatus;
import software.amazon.awssdk.awscore.exception.AwsServiceException;
import software.amazon.awssdk.core.interceptor.Context;
import software.amazon.awssdk.core.interceptor.ExecutionAttribute;
import software.amazon.awssdk.core.interceptor.ExecutionAttributes;
import software.amazon.awssdk.core.interceptor.ExecutionInterceptor;
import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute;
import software.amazon.awssdk.http.SdkHttpRequest;
import software.amazon.awssdk.http.SdkHttpResponse;
import java.util.List;
/**
* Collect common <a href="http://netflix.github.io/spectator/en/latest/ext/ipc/">IPC metrics</a>
* for AWS SDK requests. This class should get loaded automatically by the SDK if it is in the
* classpath.
*/
public class SpectatorExecutionInterceptor implements ExecutionInterceptor {
private static final ExecutionAttribute<IpcLogEntry> LOG_ENTRY =
new ExecutionAttribute<>("SpectatorIpcLogEntry");
private static final ExecutionAttribute<Boolean> STATUS_IS_SET =
new ExecutionAttribute<>("SpectatorIpcStatusIsSet");
private final IpcLogger logger;
/**
* Create a new instance using {@link Spectator#globalRegistry()}.
*/
public SpectatorExecutionInterceptor() {
this(Spectator.globalRegistry());
}
/**
* Create a new instance using the specified registry.
*
* @param registry
* Registry to use for managing the collected metrics.
*/
public SpectatorExecutionInterceptor(Registry registry) {
this.logger = new IpcLogger(registry);
}
/**
* For network errors there will not be a response so the status will not have been set. This
* method looks for a flag in the attributes to see if we need to close off the log entry for
* the attempt.
*/
private boolean isStatusSet(ExecutionAttributes attrs) {
Boolean s = attrs.getAttribute(STATUS_IS_SET);
return s != null && s;
}
/**
* If there is a retry, then {@code beforeTransmission} will be called with the previous
* attributes. This method will look for an existing entry and write out the log message.
* The log entry may not have been filled in with a status if no response was received,
* e.g., a connection exception. Since we do not have access to the failure, the status
* will get set to {@code unexpected_error}.
*/
private void logRetryAttempt(ExecutionAttributes attrs) {
IpcLogEntry logEntry = attrs.getAttribute(LOG_ENTRY);
if (logEntry != null) {
if (!isStatusSet(attrs)) {
logEntry.markEnd().withStatus(IpcStatus.unexpected_error);
}
logEntry.log();
}
}
/**
* Extract the attempt number from the {@code amz-sdk-retry} header.
*/
private IpcAttempt extractAttempt(SdkHttpRequest request) {
int attempt = 0;
List<String> vs = request.headers().get("amz-sdk-retry");
if (vs != null) {
for (String v : vs) {
// Format is: {requestCount - 1}/{lastBackoffDelay}/{availableRetryCapacity}
// See internal RetryHandler for more details.
int pos = v.indexOf('/');
if (pos > 0) {
try {
attempt = Integer.parseInt(v.substring(0, pos)) + 1;
} catch (NumberFormatException e) {
// If we cannot parse it, then attempt is unknown
attempt = 0;
}
}
}
}
return IpcAttempt.forAttemptNumber(attempt);
}
@Override
public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes attrs) {
logRetryAttempt(attrs);
String serviceName = attrs.getAttribute(SdkExecutionAttribute.SERVICE_NAME);
String opName = attrs.getAttribute(SdkExecutionAttribute.OPERATION_NAME);
String endpoint = serviceName + "." + opName;
SdkHttpRequest request = context.httpRequest();
IpcLogEntry logEntry = logger.createClientEntry()
.withOwner("aws-sdk-java-v2")
.withProtocol(IpcProtocol.http_1)
.withHttpMethod(request.method().name())
.withUri(request.getUri())
.withEndpoint(endpoint)
.withAttempt(extractAttempt(request))
.withAttemptFinal(false); // Don't know if it is the final attempt
request.headers().forEach((k, vs) -> vs.forEach(v -> logEntry.addRequestHeader(k, v)));
attrs.putAttribute(LOG_ENTRY, logEntry.markStart());
}
@Override
public void afterTransmission(Context.AfterTransmission context, ExecutionAttributes attrs) {
SdkHttpResponse response = context.httpResponse();
IpcLogEntry logEntry = attrs.getAttribute(LOG_ENTRY)
.markEnd()
.withHttpStatus(response.statusCode());
attrs.putAttribute(STATUS_IS_SET, true);
response.headers().forEach((k, vs) -> vs.forEach(v -> logEntry.addResponseHeader(k, v)));
}
@Override
public void afterExecution(Context.AfterExecution context, ExecutionAttributes attrs) {
attrs.getAttribute(LOG_ENTRY).log();
}
@Override
public void onExecutionFailure(Context.FailedExecution context, ExecutionAttributes attrs) {
IpcLogEntry logEntry = attrs.getAttribute(LOG_ENTRY);
Throwable t = context.exception();
if (t instanceof AwsServiceException) {
AwsServiceException exception = ((AwsServiceException) t);
if (exception.isThrottlingException()) {
logEntry.withStatus(IpcStatus.throttled);
}
logEntry.withStatusDetail(exception.awsErrorDetails().errorCode());
}
logEntry.withException(context.exception()).log();
}
}
| 6,003 |
0 | Create_ds/spectator/spectator-ext-ipcservlet/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-ipcservlet/src/test/java/com/netflix/spectator/ipcservlet/TestUtils.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.ipcservlet;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Meter;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Utils;
import com.netflix.spectator.ipc.IpcTagKey;
import org.junit.jupiter.api.Assertions;
import java.util.stream.Stream;
final class TestUtils {
private TestUtils() {
}
static Stream<Id> clientCallMetrics(Registry registry) {
return registry
.stream()
.map(Meter::id)
.filter(id -> "ipc.client.call".equals(id.name()));
}
static Stream<Id> serverCallMetrics(Registry registry) {
return registry
.stream()
.map(Meter::id)
.filter(id -> "ipc.server.call".equals(id.name()));
}
static void checkEndpoint(Id id, String expected) {
String endpoint = Utils.getTagValue(id, IpcTagKey.endpoint.key());
Assertions.assertEquals(expected, endpoint);
}
static void checkClientEndpoint(Registry registry, String expected) {
Assertions.assertTrue(clientCallMetrics(registry).count() > 0);
clientCallMetrics(registry).forEach(id -> checkEndpoint(id, expected));
}
static void checkServerEndpoint(Registry registry, String expected) {
Assertions.assertTrue(serverCallMetrics(registry).count() > 0);
serverCallMetrics(registry).forEach(id -> checkEndpoint(id, expected));
}
static void checkEndpoint(Registry registry, String expected) {
checkClientEndpoint(registry, expected);
checkServerEndpoint(registry, expected);
}
static void checkStatus(Id id, String expected) {
String endpoint = Utils.getTagValue(id, IpcTagKey.httpStatus.key());
Assertions.assertEquals(expected, endpoint);
}
static void checkClientStatus(Registry registry, String expected) {
Assertions.assertTrue(clientCallMetrics(registry).count() > 0);
clientCallMetrics(registry).forEach(id -> checkStatus(id, expected));
}
static void checkServerStatus(Registry registry, String expected) {
Assertions.assertTrue(serverCallMetrics(registry).count() > 0);
serverCallMetrics(registry).forEach(id -> checkStatus(id, expected));
}
static void checkStatus(Registry registry, String expected) {
checkClientStatus(registry, expected);
checkServerStatus(registry, expected);
}
static void checkMethod(Id id, String expected) {
String endpoint = Utils.getTagValue(id, IpcTagKey.httpMethod.key());
Assertions.assertEquals(expected, endpoint);
}
static void checkClientMethod(Registry registry, String expected) {
Assertions.assertTrue(clientCallMetrics(registry).count() > 0);
clientCallMetrics(registry).forEach(id -> checkMethod(id, expected));
}
static void checkServerMethod(Registry registry, String expected) {
Assertions.assertTrue(serverCallMetrics(registry).count() > 0);
serverCallMetrics(registry).forEach(id -> checkMethod(id, expected));
}
static void checkMethod(Registry registry, String expected) {
checkClientMethod(registry, expected);
checkServerMethod(registry, expected);
}
static void checkErrorReason(Id id, String expected) {
String endpoint = Utils.getTagValue(id, IpcTagKey.statusDetail.key());
Assertions.assertEquals(expected, endpoint);
}
static void checkClientErrorReason(Registry registry, String expected) {
Assertions.assertTrue(clientCallMetrics(registry).count() > 0);
clientCallMetrics(registry).forEach(id -> checkErrorReason(id, expected));
}
static void checkServerErrorReason(Registry registry, String expected) {
Assertions.assertTrue(serverCallMetrics(registry).count() > 0);
serverCallMetrics(registry).forEach(id -> checkErrorReason(id, expected));
}
static void checkErrorReason(Registry registry, String expected) {
checkClientErrorReason(registry, expected);
checkServerErrorReason(registry, expected);
}
static void checkResult(Id id, String expected) {
String endpoint = Utils.getTagValue(id, IpcTagKey.result.key());
Assertions.assertEquals(expected, endpoint);
}
static void checkClientResult(Registry registry, String expected) {
Assertions.assertTrue(clientCallMetrics(registry).count() > 0);
clientCallMetrics(registry).forEach(id -> checkResult(id, expected));
}
static void checkServerResult(Registry registry, String expected) {
Assertions.assertTrue(serverCallMetrics(registry).count() > 0);
serverCallMetrics(registry).forEach(id -> checkResult(id, expected));
}
static void checkResult(Registry registry, String expected) {
checkClientResult(registry, expected);
checkServerResult(registry, expected);
}
}
| 6,004 |
0 | Create_ds/spectator/spectator-ext-ipcservlet/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-ipcservlet/src/test/java/com/netflix/spectator/ipcservlet/IpcServletFilterTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.ipcservlet;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Spectator;
import com.netflix.spectator.ipc.IpcLogger;
import com.netflix.spectator.ipc.IpcMetric;
import com.netflix.spectator.ipc.http.HttpClient;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.servlet.ServletHandler;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import javax.servlet.DispatcherType;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.EnumSet;
import static com.netflix.spectator.ipcservlet.TestUtils.*;
public class IpcServletFilterTest {
private static Server server;
private static URI baseUri;
@BeforeAll
public static void init() throws Exception {
server = new Server(new InetSocketAddress("localhost", 0));
ServletHandler handler = new ServletHandler();
handler.addServletWithMapping(OkServlet.class, "/test/foo/*");
handler.addServletWithMapping(OkServlet.class, "/api/*");
handler.addServletWithMapping(BadRequestServlet.class, "/bad/*");
handler.addServletWithMapping(FailServlet.class, "/throw/*");
handler.addServletWithMapping(CustomEndpointServlet.class, "/endpoint/*");
handler.addServletWithMapping(OkServlet.class, "/*");
handler.addFilterWithMapping(IpcServletFilter.class, "/*", EnumSet.of(DispatcherType.REQUEST));
server.setHandler(handler);
server.start();
baseUri = server.getURI();
}
@AfterAll
public static void shutdown() throws Exception {
server.stop();
}
private Registry registry;
private HttpClient client;
@BeforeEach
public void before() {
registry = new DefaultRegistry();
client = HttpClient.create(new IpcLogger(registry));
Spectator.globalRegistry().removeAll();
Spectator.globalRegistry().add(registry);
}
@Test
public void validateIdTest() throws Exception {
client.get(baseUri.resolve("/test/foo/12345?q=54321")).send();
checkResult(registry, "success");
checkStatus(registry, "200");
checkMethod(registry, "GET");
checkEndpoint(registry, "/test/foo");
IpcMetric.validate(registry, true);
}
@Test
public void validateIdApi() throws Exception {
client.get(baseUri.resolve("/api/v1/asg/12345")).send();
checkEndpoint(registry, "/api");
IpcMetric.validate(registry, true);
}
@Test
public void validateIdRoot() throws Exception {
client.get(baseUri.resolve("/12345")).send();
checkEndpoint(registry, "/");
IpcMetric.validate(registry, true);
}
@Test
public void validateIdBadRequest() throws Exception {
client.post(baseUri.resolve("/bad/12345")).send();
checkResult(registry, "failure");
checkErrorReason(registry, null);
checkStatus(registry, "400");
checkMethod(registry, "POST");
checkEndpoint(registry, "/bad");
IpcMetric.validate(registry, true);
}
@Test
public void validateIdThrow() throws Exception {
client.get(baseUri.resolve("/throw/12345")).send();
checkResult(registry, "failure");
checkClientErrorReason(registry, null);
checkServerErrorReason(registry, "RuntimeException");
checkMethod(registry, "GET");
checkClientEndpoint(registry, "_throw_-");
checkServerEndpoint(registry, "/throw");
IpcMetric.validate(registry, true);
}
@Test
public void validateIdCustom() throws Exception {
client.delete(baseUri.resolve("/endpoint/foo/12345?q=54321")).send();
checkResult(registry, "success");
checkStatus(registry, "200");
checkMethod(registry, "DELETE");
checkEndpoint(registry, "/servlet"); // header set in the servlet
IpcMetric.validate(registry, true);
}
public static class OkServlet extends HttpServlet {
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response) {
response.setStatus(200);
}
}
public static class BadRequestServlet extends HttpServlet {
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response) {
response.setStatus(400);
}
@Override
public void doPost(HttpServletRequest request, HttpServletResponse response) {
response.setStatus(400);
}
}
public static class FailServlet extends HttpServlet {
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response) {
throw new RuntimeException("something bad happened");
}
}
public static class CustomEndpointServlet extends HttpServlet {
@Override
public void doDelete(HttpServletRequest request, HttpServletResponse response) {
response.setStatus(200);
response.addHeader("Netflix-Endpoint", "/servlet");
}
}
}
| 6,005 |
0 | Create_ds/spectator/spectator-ext-ipcservlet/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-ipcservlet/src/test/java/com/netflix/spectator/ipcservlet/GuiceServletFilterTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.ipcservlet;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceFilter;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Spectator;
import com.netflix.spectator.ipc.IpcLogger;
import com.netflix.spectator.ipc.http.HttpClient;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.junit.jupiter.api.*;
import javax.inject.Singleton;
import javax.servlet.DispatcherType;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.EnumSet;
import static com.netflix.spectator.ipcservlet.TestUtils.*;
public class GuiceServletFilterTest {
// https://github.com/google/guice/issues/807
private static Server server;
private static URI baseUri;
@BeforeAll
public static void init() throws Exception {
server = new Server(new InetSocketAddress("localhost", 0));
ServletContextHandler handler = new ServletContextHandler(server, "/");
handler.addEventListener(new TestListener());
handler.addFilter(GuiceFilter.class, "/*", EnumSet.allOf(DispatcherType.class));
server.setHandler(handler);
server.start();
baseUri = server.getURI();
}
@AfterAll
public static void shutdown() throws Exception {
server.stop();
}
private Registry registry;
private HttpClient client;
@BeforeEach
public void before() {
registry = new DefaultRegistry();
client = HttpClient.create(new IpcLogger(registry));
Spectator.globalRegistry().removeAll();
Spectator.globalRegistry().add(registry);
}
@Test
public void validateIdTest() throws Exception {
client.get(baseUri.resolve("/test/foo/12345")).send();
checkEndpoint(registry, "/test/foo");
}
@Test
public void validateIdApi() throws Exception {
client.get(baseUri.resolve("/api/v1/asgs/12345")).send();
checkEndpoint(registry, "/api");
}
@Test
public void validateIdRoot() throws Exception {
client.get(baseUri.resolve("/12345")).send();
checkEndpoint(registry, "/");
}
@Singleton
public static class TestServlet extends HttpServlet {
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response) {
response.setStatus(200);
}
}
@Singleton
public static class TestListener extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(Registry.class).toInstance(Spectator.globalRegistry());
}
},
new ServletModule() {
@Override
protected void configureServlets() {
serve("/test/foo/*").with(TestServlet.class);
serve("/api/*").with(TestServlet.class);
serve("/*").with(TestServlet.class);
filter("/*").through(IpcServletFilter.class);
}
}
);
}
}
}
| 6,006 |
0 | Create_ds/spectator/spectator-ext-ipcservlet/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-ipcservlet/src/main/java/com/netflix/spectator/ipcservlet/ServletPathHack.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.ipcservlet;
import javax.servlet.http.HttpServletRequest;
import java.lang.reflect.Array;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
/**
* This is a hack to work around <a href="https://github.com/google/guice/issues/807">#807</a>.
* It uses reflection to extract the servlet path based on the pattern that was used for the
* guice bindings.
*/
@SuppressWarnings("PMD.AvoidAccessibilityAlteration")
final class ServletPathHack {
private ServletPathHack() {
}
private static final String PACKAGE = "com.google.inject.servlet";
private static volatile boolean hackWorks = true;
private static Object get(Object obj, String name) throws Exception {
Field f = obj.getClass().getDeclaredField(name);
f.setAccessible(true);
return f.get(obj);
}
private static boolean matches(Object obj, String path) throws Exception {
Method m = obj.getClass().getDeclaredMethod("matches", String.class);
m.setAccessible(true);
return (Boolean) m.invoke(obj, path);
}
private static String extractPath(Object obj, String path) throws Exception {
Method m = obj.getClass().getDeclaredMethod("extractPath", String.class);
m.setAccessible(true);
return (String) m.invoke(obj, path);
}
/** Helper to get the servlet path for the request. */
static String getServletPath(HttpServletRequest request) {
String servletPath = request.getServletPath();
if (hackWorks && PACKAGE.equals(request.getClass().getPackage().getName())) {
try {
// In guice 4.1.0, we need to go through a wrapper object to get to the servlet
// pipeline
Object outer;
String pipelineField = "servletPipeline";
try {
outer = get(request, "this$0");
} catch (NoSuchFieldException e) {
// For later versions like guice 5.0.1, just use the request and
outer = request;
pipelineField = "val$" + pipelineField;
}
Object servletPipeline = get(outer, pipelineField);
Object servletDefs = get(servletPipeline, "servletDefinitions");
int length = Array.getLength(servletDefs);
for (int i = 0; i < length; ++i) {
Object pattern = get(Array.get(servletDefs, i), "patternMatcher");
if (matches(pattern, servletPath)) {
servletPath = extractPath(pattern, servletPath);
break;
}
}
} catch (Exception e) {
hackWorks = false;
}
}
return servletPath;
}
}
| 6,007 |
0 | Create_ds/spectator/spectator-ext-ipcservlet/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-ipcservlet/src/main/java/com/netflix/spectator/ipcservlet/IpcServletFilter.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.ipcservlet;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Spectator;
import com.netflix.spectator.ipc.IpcLogEntry;
import com.netflix.spectator.ipc.IpcLogger;
import com.netflix.spectator.ipc.NetflixHeader;
import com.netflix.spectator.ipc.NetflixHeaders;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.Collection;
import java.util.Enumeration;
import java.util.Map;
/**
* Servlet filter implementation that provides common IPC metrics for filtered requests.
*/
@Singleton
public class IpcServletFilter implements Filter {
private static final Logger LOGGER = LoggerFactory.getLogger(IpcServletFilter.class);
private final IpcLogger ipcLogger;
private final Map<String, String> netflixHeaders;
/**
* Create a new instance using the global registry. This is typically used when defining
* the filter in the {@code web.xml} file and it is created by the container.
*/
public IpcServletFilter() {
this(Spectator.globalRegistry());
}
/** Create a new instance using the specified registry. */
@Inject
public IpcServletFilter(Registry registry) {
this.ipcLogger = new IpcLogger(registry, LOGGER);
this.netflixHeaders = NetflixHeaders.extractFromEnvironment();
}
@SuppressWarnings("PMD.AvoidCatchingThrowable")
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
if (request instanceof HttpServletRequest && response instanceof HttpServletResponse) {
HttpServletRequest httpReq = (HttpServletRequest) request;
HttpServletResponse httpRes = (HttpServletResponse) response;
String endpoint = getEndpoint(httpReq);
IpcLogEntry entry = ipcLogger.createServerEntry()
.withOwner("spectator")
.withHttpMethod(httpReq.getMethod())
.withUri(httpReq.getRequestURI(), httpReq.getRequestURI());
addRequestHeaders(httpReq, entry);
entry.markStart();
try {
chain.doFilter(request, response);
entry.markEnd().withHttpStatus(httpRes.getStatus());
} catch (Throwable t) {
entry.markEnd()
.withException(t)
.withHttpStatus(500);
throw t;
} finally {
addNetflixHeaders(httpRes, endpoint);
addResponseHeaders(httpRes, entry);
entry.log();
}
} else {
chain.doFilter(request, response);
}
}
private String getEndpoint(HttpServletRequest httpReq) {
String servletPath = ServletPathHack.getServletPath(httpReq);
return (servletPath == null || servletPath.isEmpty())
? "/"
: servletPath;
}
private void addNetflixHeaders(HttpServletResponse httpRes, String endpoint) {
addIfNotPresent(httpRes, NetflixHeader.Endpoint.headerName(), endpoint);
for (Map.Entry<String, String> entry : netflixHeaders.entrySet()) {
addIfNotPresent(httpRes, entry.getKey(), entry.getValue());
}
}
private void addIfNotPresent(HttpServletResponse httpRes, String name, String value) {
if (httpRes.getHeader(name) == null) {
httpRes.addHeader(name, value);
}
}
private void addRequestHeaders(HttpServletRequest httpReq, IpcLogEntry entry) {
Enumeration<String> headers = httpReq.getHeaderNames();
while (headers.hasMoreElements()) {
String header = headers.nextElement();
Enumeration<String> values = httpReq.getHeaders(header);
while (values.hasMoreElements()) {
entry.addRequestHeader(header, values.nextElement());
}
}
}
private void addResponseHeaders(HttpServletResponse httpRes, IpcLogEntry entry) {
Collection<String> headers = httpRes.getHeaderNames();
for (String header : headers) {
Collection<String> values = httpRes.getHeaders(header);
for (String value : values) {
entry.addResponseHeader(header, value);
}
}
}
//
// In the servlet-api 4.x versions there are default implementations of the methods
// below. To avoid AbstractMethodErrors when running on older versions, we explicitly
// override them with empty implementations.
//
@Override public void init(FilterConfig filterConfig) throws ServletException {
}
@Override public void destroy() {
}
}
| 6,008 |
0 | Create_ds/spectator/spectator-ext-jvm/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/test/java/com/netflix/spectator/jvm/HotspotRuntimeTest.java | /*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.Test;
public class HotspotRuntimeTest {
private void triggerSafepoint() {
// May not always work, but seems to work for the tests...
System.gc();
}
@Test
public void safepointCount() {
Assumptions.assumeTrue(HotspotRuntime.isSupported());
long before = HotspotRuntime.getSafepointCount();
triggerSafepoint();
long after = HotspotRuntime.getSafepointCount();
Assertions.assertTrue(after > before);
}
@Test
public void safepointTime() {
Assumptions.assumeTrue(HotspotRuntime.isSupported());
HotspotRuntime.getSafepointTime();
}
@Test
public void safepointSyncTime() {
Assumptions.assumeTrue(HotspotRuntime.isSupported());
HotspotRuntime.getSafepointSyncTime();
}
}
| 6,009 |
0 | Create_ds/spectator/spectator-ext-jvm/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/test/java/com/netflix/spectator/jvm/JmxBean.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import javax.management.Attribute;
import javax.management.AttributeList;
import javax.management.AttributeNotFoundException;
import javax.management.DynamicMBean;
import javax.management.InvalidAttributeValueException;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanException;
import javax.management.MBeanInfo;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import java.lang.management.ManagementFactory;
import java.util.Map;
import java.util.TreeMap;
class JmxBean implements DynamicMBean {
private static MBeanServer MBEAN_SERVER = ManagementFactory.getPlatformMBeanServer();
static void register(JmxBean bean) throws Exception {
if (MBEAN_SERVER.isRegistered(bean.id)) {
MBEAN_SERVER.unregisterMBean(bean.id);
}
MBEAN_SERVER.registerMBean(bean, bean.id);
}
static void unregister(JmxBean bean) throws Exception {
if (MBEAN_SERVER.isRegistered(bean.id)) {
MBEAN_SERVER.unregisterMBean(bean.id);
}
}
private final ObjectName id;
private final Map<String, Object> attributes;
JmxBean(ObjectName id, Map<String, Object> attributes) {
this.id = id;
this.attributes = new TreeMap<>(attributes);
}
@Override
public Object getAttribute(String name)
throws AttributeNotFoundException, MBeanException, ReflectionException {
Object value = attributes.get(name);
if (value == null) {
throw new AttributeNotFoundException("no attribute '" + name + "' for jmx bean '" + id + "'");
}
return value;
}
@Override
public AttributeList getAttributes(String[] names) {
AttributeList list = new AttributeList();
for (String name : names) {
try {
list.add(new Attribute(name, getAttribute(name)));
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
return list;
}
@Override
public void setAttribute(Attribute attribute)
throws AttributeNotFoundException, InvalidAttributeValueException, MBeanException, ReflectionException {
throw new UnsupportedOperationException("mbean '" + id + "' is read-only");
}
@Override
public AttributeList setAttributes(AttributeList attributes) {
throw new UnsupportedOperationException("mbean '" + id + "' is read-only");
}
@Override
public Object invoke(String actionName, Object[] params, String[] signature)
throws MBeanException, ReflectionException {
throw new UnsupportedOperationException("mbean '" + id + "' is read-only");
}
@Override
public MBeanInfo getMBeanInfo() {
MBeanAttributeInfo[] mbeanAttributes = new MBeanAttributeInfo[attributes.size()];
int i = 0;
for (Map.Entry<String, Object> entry : attributes.entrySet()) {
String attrName = entry.getKey();
Object attrValue = entry.getValue();
String typeName = (attrValue instanceof Number)
? Number.class.getName()
: String.class.getName();
boolean isReadable = true;
boolean isWritable = false;
boolean isIs = false;
mbeanAttributes[i++] = new MBeanAttributeInfo(
attrName, typeName, "???", isReadable, isWritable, isIs);
}
return new MBeanInfo(getClass().getName(), "???", mbeanAttributes, null, null, null);
}
}
| 6,010 |
0 | Create_ds/spectator/spectator-ext-jvm/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/test/java/com/netflix/spectator/jvm/MappingExprTest.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.HashMap;
import java.util.Map;
public class MappingExprTest {
@Test
public void substituteEmpty() {
Map<String, String> vars = new HashMap<>();
String actual = MappingExpr.substitute("", vars);
Assertions.assertEquals("", actual);
}
@Test
public void substituteMissing() {
Map<String, String> vars = new HashMap<>();
String actual = MappingExpr.substitute("abc{def}", vars);
Assertions.assertEquals("abc{def}", actual);
}
@Test
public void substituteSingle() {
Map<String, String> vars = new HashMap<>();
vars.put("def", "123");
String actual = MappingExpr.substitute("abc{def}", vars);
Assertions.assertEquals("abc123", actual);
}
@Test
public void substituteSingleEmptyVarName() {
Map<String, String> vars = new HashMap<>();
vars.put("", "123");
String actual = MappingExpr.substitute("abc{}", vars);
Assertions.assertEquals("abc123", actual);
}
@Test
public void substituteSingleMissingClose() {
Map<String, String> vars = new HashMap<>();
vars.put("def", "123");
String actual = MappingExpr.substitute("abc{def", vars);
Assertions.assertEquals("abc{def", actual);
}
@Test
public void substituteMultiple() {
Map<String, String> vars = new HashMap<>();
vars.put("def", "123");
String actual = MappingExpr.substitute("abc{def}, {def}", vars);
Assertions.assertEquals("abc123, 123", actual);
}
@Test
public void substituteMultipleMissingClose() {
Map<String, String> vars = new HashMap<>();
vars.put("def", "123");
String actual = MappingExpr.substitute("abc{def}, {def", vars);
Assertions.assertEquals("abc123, {def", actual);
}
@Test
public void substituteMultipleContainsOpenBrace() {
Map<String, String> vars = new HashMap<>();
vars.put("def, {def", "123");
String actual = MappingExpr.substitute("abc{def, {def}", vars);
Assertions.assertEquals("abc123", actual);
}
@Test
public void substituteMultiVar() {
Map<String, String> vars = new HashMap<>();
vars.put("def", "123");
vars.put("ghi", "456");
String actual = MappingExpr.substitute("abc{def}, {ghi}, {def}", vars);
Assertions.assertEquals("abc123, 456, 123", actual);
}
@Test
public void substituteDecapitalize() {
Map<String, String> vars = new HashMap<>();
vars.put("name", "FooBarBaz");
String actual = MappingExpr.substitute("abc.def.{name}", vars);
Assertions.assertEquals("abc.def.fooBarBaz", actual);
}
@Test
public void substituteRaw() {
Map<String, String> vars = new HashMap<>();
vars.put("name", "FooBarBaz");
String actual = MappingExpr.substitute("abc.def.{raw:name}", vars);
Assertions.assertEquals("abc.def.FooBarBaz", actual);
}
@Test
public void substituteRawEmtpyVarName() {
Map<String, String> vars = new HashMap<>();
vars.put("", "FooBarBaz");
String actual = MappingExpr.substitute("abc.def.{raw:}", vars);
Assertions.assertEquals("abc.def.FooBarBaz", actual);
}
@Test
public void evalMissing() {
Map<String, Number> vars = new HashMap<>();
Double v = MappingExpr.eval("{foo}", vars);
Assertions.assertTrue(v.isNaN());
}
@Test
public void evalSimple() {
Map<String, Number> vars = new HashMap<>();
vars.put("foo", 0.0);
Double v = MappingExpr.eval("{foo}", vars);
Assertions.assertEquals(0.0, v, 1e-12);
}
@Test
public void evalConstant() {
Map<String, Number> vars = new HashMap<>();
Double v = MappingExpr.eval("42.0", vars);
Assertions.assertEquals(42.0, v, 1e-12);
}
@Test
public void evalAdd() {
Map<String, Number> vars = new HashMap<>();
vars.put("foo", 1.0);
Double v = MappingExpr.eval("42.0,{foo},:add", vars);
Assertions.assertEquals(43.0, v, 1e-12);
}
@Test
public void evalSub() {
Map<String, Number> vars = new HashMap<>();
vars.put("foo", 1.0);
Double v = MappingExpr.eval("42.0,{foo},:sub", vars);
Assertions.assertEquals(41.0, v, 1e-12);
}
@Test
public void evalMul() {
Map<String, Number> vars = new HashMap<>();
vars.put("foo", 2.0);
Double v = MappingExpr.eval("42.0,{foo},:mul", vars);
Assertions.assertEquals(84.0, v, 1e-12);
}
@Test
public void evalDiv() {
Map<String, Number> vars = new HashMap<>();
vars.put("foo", 2.0);
Double v = MappingExpr.eval("42.0,{foo},:div", vars);
Assertions.assertEquals(21.0, v, 1e-12);
}
@Test
public void evalIfChangedYes() {
Map<String, Number> vars = new HashMap<>();
vars.put("foo", 2.0);
vars.put("previous:foo", 3.0);
Double v = MappingExpr.eval("42.0,{foo},{previous:foo},:if-changed", vars);
Assertions.assertEquals(42.0, v, 1e-12);
}
@Test
public void evalIfChangedNo() {
Map<String, Number> vars = new HashMap<>();
vars.put("foo", 2.0);
vars.put("previous:foo", 2.0);
Double v = MappingExpr.eval("42.0,{foo},{previous:foo},:if-changed", vars);
Assertions.assertEquals(0.0, v, 1e-12);
}
}
| 6,011 |
0 | Create_ds/spectator/spectator-ext-jvm/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/test/java/com/netflix/spectator/jvm/CassandraTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.ManualClock;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Utils;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import javax.management.ObjectName;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
public class CassandraTest {
private final Config config = ConfigFactory.load("cassandra");
private List<JmxConfig> configs() {
List<JmxConfig> cfgs = new ArrayList<>();
for (Config cfg : config.getConfigList("netflix.spectator.agent.jmx.mappings")) {
cfgs.add(JmxConfig.from(cfg));
}
return cfgs;
}
private List<Measurement> measure(Registry registry, List<JmxConfig> configs, JmxData data) {
for (JmxConfig cfg : configs) {
if (cfg.getQuery().apply(data.getName())) {
for (JmxMeasurementConfig c : cfg.getMeasurements()) {
c.measure(registry, data);
}
}
}
return registry.stream()
.flatMap(m -> StreamSupport.stream(m.measure().spliterator(), false))
.collect(Collectors.toList());
}
private JmxData timer(String props, int i) throws Exception {
ObjectName name = new ObjectName("org.apache.cassandra.metrics:" + props);
Map<String, String> stringAttrs = new HashMap<>(name.getKeyPropertyList());
stringAttrs.put("EventType", "calls");
stringAttrs.put("LatencyUnit", "MICROSECONDS");
stringAttrs.put("RateUnit", "SECONDS");
Map<String, Number> numAttrs = new HashMap<>();
numAttrs.put("OneMinuteRate", 100.0 + i);
numAttrs.put("FiveMinuteRate", 500.0 + i);
numAttrs.put("FifteenMinuteRate", 1500.0 + i);
numAttrs.put("MeanRate", 987.0 + i);
numAttrs.put("Count", 1000 + i);
numAttrs.put("Min", 10 + i);
numAttrs.put("Max", 9000 + i);
numAttrs.put("Mean", 1000 + i);
numAttrs.put("StdDev", 10 + i);
numAttrs.put("50thPercentile", 5000.0 + i);
numAttrs.put("75thPercentile", 7500.0 + i);
numAttrs.put("95thPercentile", 9500.0 + i);
numAttrs.put("99thPercentile", 9900.0 + i);
numAttrs.put("999thPercentile", 9990.0 + i);
return new JmxData(name, stringAttrs, numAttrs);
}
@Test
public void readLatency() throws Exception {
Registry r = new DefaultRegistry(new ManualClock());
List<JmxConfig> configs = configs();
JmxData data = timer("keyspace=test,name=ReadLatency,scope=foo,type=ColumnFamily", 0);
List<Measurement> ms = measure(r, configs, data);
Assertions.assertEquals(7, ms.size());
Assertions.assertEquals(
50.0e-4,
Utils.first(ms, "statistic", "percentile_50").value(),
1e-12);
data = timer("keyspace=test,name=ReadLatency,scope=foo,type=ColumnFamily", 1);
ms = measure(r, configs, data);
Assertions.assertEquals(7, ms.size());
Assertions.assertEquals(
50.01e-4,
Utils.first(ms, "statistic", "percentile_50").value(),
1e-12);
}
// Compensate for: https://github.com/dropwizard/metrics/issues/1030
@Test
public void readLatencyNoActivity() throws Exception {
Registry r = new DefaultRegistry(new ManualClock());
List<JmxConfig> configs = configs();
JmxData data = timer("keyspace=test,name=ReadLatency,scope=foo,type=ColumnFamily", 0);
List<Measurement> ms = measure(r, configs, data);
Assertions.assertEquals(7, ms.size());
Assertions.assertEquals(
50.0e-4,
Utils.first(ms, "statistic", "percentile_50").value(),
1e-12);
data = timer("keyspace=test,name=ReadLatency,scope=foo,type=ColumnFamily", 0);
ms = measure(r, configs, data);
Assertions.assertEquals(7, ms.size());
Assertions.assertEquals(
0.0,
Utils.first(ms, "statistic", "percentile_50").value(),
1e-12);
data = timer("keyspace=test,name=ReadLatency,scope=foo,type=ColumnFamily", 1);
ms = measure(r, configs, data);
Assertions.assertEquals(7, ms.size());
Assertions.assertEquals(
50.01e-4,
Utils.first(ms, "statistic", "percentile_50").value(),
1e-12);
}
}
| 6,012 |
0 | Create_ds/spectator/spectator-ext-jvm/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/test/java/com/netflix/spectator/jvm/JmxDataTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import javax.management.ObjectName;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class JmxDataTest {
@Test
public void objectNamePropOverridesAttributes() throws Exception {
ObjectName id = new ObjectName("CatalinaTest:type=ThreadPool,name=\"http-nio\"");
Map<String, Object> attributes = new HashMap<>();
attributes.put("modelType", "nio");
attributes.put("name", "http-nio");
JmxBean bean = new JmxBean(id, attributes);
JmxBean.register(bean);
List<JmxData> results = JmxData.query("CatalinaTest:type=ThreadPool,*");
Assertions.assertEquals(1, results.size());
results.forEach(data -> {
Assertions.assertEquals("nio", data.getStringAttrs().get("modelType"));
Assertions.assertEquals("\"http-nio\"", data.getStringAttrs().get("name"));
});
JmxBean.unregister(bean);
}
}
| 6,013 |
0 | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator/jvm/JmxMeasurementConfig.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.typesafe.config.Config;
import javax.management.ObjectName;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
/**
* Config for extracting a measurment from a JMX bean.
*
* <ul>
* <li><b>name:</b> name pattern to use, see {@link MappingExpr#substitute(String, Map)}</li>
* <li><b>tags:</b> tags to add on, values can use patterns,
* see {@link MappingExpr#substitute(String, Map)}</li>
* <li><b>value:</b> value expression, see {@link MappingExpr#eval(String, Map)}</li>
* <li><b>counter:</b> is the value a monotonically increasing counter value?</li>
* </ul>
*/
final class JmxMeasurementConfig {
/** Create from a Typesafe Config object. */
static JmxMeasurementConfig from(Config config) {
String name = config.getString("name");
Map<String, String> tags = config.getConfigList("tags")
.stream()
.collect(Collectors.toMap(c -> c.getString("key"), c -> c.getString("value")));
String value = config.getString("value");
boolean counter = config.hasPath("counter") && config.getBoolean("counter");
return new JmxMeasurementConfig(name, tags, value, counter);
}
private final String nameMapping;
private final Map<String, String> tagMappings;
private final String valueMapping;
private final boolean counter;
private final Map<ObjectName, JmxData> previousData;
private final Map<Id, AtomicLong> previousCount;
/** Create a new instance. */
JmxMeasurementConfig(
String nameMapping,
Map<String, String> tagMappings,
String valueMapping,
boolean counter) {
this.nameMapping = nameMapping;
this.tagMappings = tagMappings;
this.valueMapping = valueMapping;
this.counter = counter;
this.previousData = new ConcurrentHashMap<>();
this.previousCount = new ConcurrentHashMap<>();
}
/**
* Fill in {@code ms} with measurements extracted from {@code data}.
*/
void measure(Registry registry, JmxData data) {
Map<String, String> tags = tagMappings.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> MappingExpr.substitute(e.getValue(), data.getStringAttrs())
));
Id id = registry
.createId(MappingExpr.substitute(nameMapping, data.getStringAttrs()))
.withTags(tags);
Map<String, Number> numberAttrs = new HashMap<>(data.getNumberAttrs());
JmxData previous = previousData.put(data.getName(), data);
if (previous != null) {
previous.getNumberAttrs().forEach((key, value) -> numberAttrs.put("previous:" + key, value));
}
Double v = MappingExpr.eval(valueMapping, numberAttrs);
if (v != null && !v.isNaN()) {
if (counter) {
updateCounter(registry, id, v.longValue());
} else {
registry.gauge(id).set(v);
}
}
}
private void updateCounter(Registry registry, Id id, long v) {
AtomicLong prev = previousCount.computeIfAbsent(id, i -> new AtomicLong(Long.MIN_VALUE));
long p = prev.get();
if (prev.compareAndSet(p, v) && p != Long.MIN_VALUE) {
registry.counter(id).increment(v - p);
}
}
}
| 6,014 |
0 | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator/jvm/JmxMeter.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Meter;
import com.netflix.spectator.api.Registry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
/** Meter based on a {@link JmxConfig}. */
final class JmxMeter implements Meter {
private static final Logger LOGGER = LoggerFactory.getLogger(JmxMeter.class);
private final Registry registry;
private final JmxConfig config;
private final Id id;
/** Create a new instance. */
JmxMeter(Registry registry, JmxConfig config) {
this.registry = registry;
this.config = config;
this.id = registry.createId(config.getQuery().getCanonicalName());
}
@Override public Id id() {
return id;
}
@Override
public Iterable<Measurement> measure() {
try {
for (JmxData data : JmxData.query(config.getQuery())) {
for (JmxMeasurementConfig cfg : config.getMeasurements()) {
cfg.measure(registry, data);
}
}
} catch (Exception e) {
LOGGER.warn("failed to query jmx data: {}", config.getQuery().getCanonicalName(), e);
}
// The measure will update counter/gauge values in the registry directly
return Collections.emptyList();
}
@Override public boolean hasExpired() {
return false;
}
}
| 6,015 |
0 | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator/jvm/JmxConfig.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import com.typesafe.config.Config;
import javax.management.ObjectName;
import java.util.ArrayList;
import java.util.List;
/**
* Config for fetching data from JMX. A configuration consists of:
*
* <ul>
* <li><b>query:</b> object name query expression, see {@link ObjectName}.</li>
* <li><b>measurements:</b> list of {@link JmxMeasurementConfig} objects.</li>
* </ul>
*/
final class JmxConfig {
/** Create a new instance from the Typesafe Config object. */
static JmxConfig from(Config config) {
try {
ObjectName query = new ObjectName(config.getString("query"));
List<JmxMeasurementConfig> ms = new ArrayList<>();
for (Config cfg : config.getConfigList("measurements")) {
ms.add(JmxMeasurementConfig.from(cfg));
}
return new JmxConfig(query, ms);
} catch (Exception e) {
throw new IllegalArgumentException("invalid mapping config", e);
}
}
private final ObjectName query;
private final List<JmxMeasurementConfig> measurements;
/** Create a new instance. */
JmxConfig(ObjectName query, List<JmxMeasurementConfig> measurements) {
this.query = query;
this.measurements = measurements;
}
/** Object name query expression. */
ObjectName getQuery() {
return query;
}
/** Measurements to extract for the query. */
List<JmxMeasurementConfig> getMeasurements() {
return measurements;
}
}
| 6,016 |
0 | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator/jvm/JmxPoller.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import com.netflix.spectator.api.Registry;
import com.typesafe.config.Config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
/**
* Helper to poll JMX data based on a config and update a registry.
*
* <p><b>This class is an internal implementation detail only intended for use by
* {@code spectator-agent}. It is subject to change without notice.</b></p>
*/
public class JmxPoller {
private static final Logger LOGGER = LoggerFactory.getLogger(JmxPoller.class);
private final Registry registry;
private List<JmxConfig> configs = Collections.emptyList();
/**
* Create a new instance.
*
* @param registry
* Registry to update when polling the data.
*/
public JmxPoller(Registry registry) {
this.registry = registry;
}
/**
* Update the set of configs for what to poll.
*/
public void updateConfigs(List<? extends Config> configs) {
this.configs = configs.stream()
.map(JmxConfig::from)
.collect(Collectors.toList());
}
/**
* Poll the JMX data once and update the registry.
*/
public void poll() {
for (JmxConfig config : configs) {
try {
for (JmxData data : JmxData.query(config.getQuery())) {
for (JmxMeasurementConfig cfg : config.getMeasurements()) {
cfg.measure(registry, data);
}
}
} catch (Exception e) {
LOGGER.warn("failed to query jmx data: {}", config.getQuery().getCanonicalName(), e);
}
}
}
}
| 6,017 |
0 | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator/jvm/HotspotRuntime.java | /*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Method;
/**
* Helper for accessing the HotspotRuntimeMBean in order to get information about the amount
* of time spent in safepoints.
*/
final class HotspotRuntime {
private static final Logger LOGGER = LoggerFactory.getLogger(HotspotRuntime.class);
private HotspotRuntime() {
}
private static Class<?> runtimeMBeanType;
private static Object runtimeMBean;
private static Method safepointCount;
private static Method safepointTime;
private static Method safepointSyncTime;
static {
try {
// The implementation class, sun.management.HotspotRuntime, is package private and
// thus the methods cannot be accessed without setAccessible(true). Use the interface
// type for getting the method handles so all reflective access is via public classes and
// methods. That allows this approach will work with either:
//
// --add-exports java.management/sun.management=ALL-UNNAMED OR
// --add-opens java.management/sun.management=ALL-UNNAMED
runtimeMBeanType = Class.forName("sun.management.HotspotRuntimeMBean");
runtimeMBean = Class.forName("sun.management.ManagementFactoryHelper")
.getMethod("getHotspotRuntimeMBean")
.invoke(null);
safepointCount = getMethod("getSafepointCount");
safepointTime = getMethod("getTotalSafepointTime");
safepointSyncTime = getMethod("getSafepointSyncTime");
} catch (Exception e) {
// Not Hotspot or IllegalAccessError from JDK 16+ due sun.management package being inaccessible
LOGGER.debug("unable to access HotspotRuntimeMBean", e);
runtimeMBean = null;
}
}
/** Get method and double check that we have permissions to invoke it. */
private static Method getMethod(String name) throws Exception {
Method method = runtimeMBeanType.getMethod(name);
method.invoke(runtimeMBean); // ignore result, just checking access
return method;
}
private static long getValue(Method method) {
if (runtimeMBean == null) {
throw new UnsupportedOperationException("HotspotRuntime is not supported");
}
try {
return (Long) method.invoke(runtimeMBean);
} catch (Exception e) {
throw new IllegalStateException("failed to invoke " + method, e);
}
}
/** Returns the HotspotRuntimeMBean instance. */
static Object getRuntimeMBean() {
return runtimeMBean;
}
/** Returns true if the safepoint checks are supported. */
static boolean isSupported() {
return runtimeMBean != null;
}
/** Total number of safepoints since the JVM was started. */
static long getSafepointCount() {
return getValue(safepointCount);
}
/** Total time in milliseconds spent in safepoints since the JVM was started. */
static long getSafepointTime() {
return getValue(safepointTime);
}
/**
* Total time in milliseconds spent synchronizing in order to get to safepoints since the
* JVM was started.
*/
static long getSafepointSyncTime() {
return getValue(safepointSyncTime);
}
}
| 6,018 |
0 | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator/jvm/JmxData.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.management.Attribute;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import javax.management.openmbean.CompositeDataSupport;
import javax.management.openmbean.CompositeType;
import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
/**
* Represents the results from querying data out of JMX.
*/
class JmxData {
private static final Logger LOGGER = LoggerFactory.getLogger(JmxData.class);
/** Get data from JMX using object name query expression. */
static List<JmxData> query(String query) throws Exception {
return query(new ObjectName(query));
}
/** Get data from JMX using object name query expression. */
static List<JmxData> query(ObjectName query) throws Exception {
return query(ManagementFactory.getPlatformMBeanServer(), query);
}
/** Convert object to string and checking if it fails. */
@SuppressWarnings("PMD.AvoidCatchingThrowable")
static String mkString(Object obj) {
if (obj == null) {
return "null";
}
try {
return obj.toString() + " (type is " + obj.getClass() + ")";
} catch (Throwable t) {
return t.getClass().toString() + ": " + t.getMessage() + " (type is " + obj.getClass() + ")";
}
}
/** Get data from JMX using object name query expression. */
static List<JmxData> query(MBeanServer server, ObjectName query) throws Exception {
List<JmxData> data = new ArrayList<>();
Set<ObjectName> names = server.queryNames(query, null);
LOGGER.trace("query [{}], found {} matches", query, names.size());
for (ObjectName name : names) {
MBeanInfo info = server.getMBeanInfo(name);
MBeanAttributeInfo[] attrs = info.getAttributes();
String[] attrNames = new String[attrs.length];
for (int i = 0; i < attrs.length; ++i) {
attrNames[i] = attrs[i].getName();
}
Map<String, String> stringAttrs = new HashMap<>();
stringAttrs.put("domain", name.getDomain());
Map<String, Number> numberAttrs = new HashMap<>();
for (Attribute attr : server.getAttributes(name, attrNames).asList()) {
Object obj = attr.getValue();
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("attribute [{}][{}] = {}", name, attr.getName(), mkString(obj));
}
if (obj instanceof String) {
stringAttrs.put(attr.getName(), (String) obj);
} else if (obj instanceof Number) {
numberAttrs.put(attr.getName(), ((Number) obj).doubleValue());
} else if (obj instanceof CompositeDataSupport) {
CompositeDataSupport composite = (CompositeDataSupport) obj;
CompositeType compositeType = composite.getCompositeType();
for (String key : compositeType.keySet()) {
if (composite.containsKey(key)) {
Object o = composite.get(key);
String attrKey = attr.getName() + "." + key;
if (o instanceof Number) {
numberAttrs.put(attrKey, ((Number) o).doubleValue());
} else if (o instanceof String) {
stringAttrs.put(attrKey, (String) o);
} else if (o instanceof TimeUnit) {
stringAttrs.put(attrKey, o.toString());
}
}
}
} else if (obj instanceof TimeUnit) {
stringAttrs.put(attr.getName(), obj.toString());
}
}
// Add properties from ObjectName after attributes to ensure they have a higher
// priority if the same key is used both in the Object and as an attribute
stringAttrs.putAll(name.getKeyPropertyList());
data.add(new JmxData(name, stringAttrs, numberAttrs));
}
return data;
}
private final ObjectName name;
private final Map<String, String> stringAttrs;
private final Map<String, Number> numberAttrs;
/**
* Create a new instance.
*/
JmxData(ObjectName name, Map<String, String> stringAttrs, Map<String, Number> numberAttrs) {
this.name = name;
this.stringAttrs = Collections.unmodifiableMap(stringAttrs);
this.numberAttrs = Collections.unmodifiableMap(numberAttrs);
}
/** Return the name of the bean. */
ObjectName getName() {
return name;
}
/** Return attributes with string values. */
Map<String, String> getStringAttrs() {
return stringAttrs;
}
/** Return attributes with numeric values. */
Map<String, Number> getNumberAttrs() {
return numberAttrs;
}
@Override public String toString() {
StringBuilder buf = new StringBuilder(256);
buf.append(name.toString())
.append("\n- string attributes\n");
for (Map.Entry<String, String> entry : new TreeMap<>(stringAttrs).entrySet()) {
buf.append(" - ").append(entry.getKey()).append(" = ").append(entry.getValue()).append('\n');
}
buf.append("- number attributes\n");
for (Map.Entry<String, Number> entry : new TreeMap<>(numberAttrs).entrySet()) {
buf.append(" - ").append(entry.getKey()).append(" = ").append(entry.getValue()).append('\n');
}
return buf.toString();
}
}
| 6,019 |
0 | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator/jvm/Jmx.java | /*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Statistic;
import com.netflix.spectator.api.patterns.PolledMeter;
import com.typesafe.config.Config;
import java.lang.management.BufferPoolMXBean;
import java.lang.management.ClassLoadingMXBean;
import java.lang.management.CompilationMXBean;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryPoolMXBean;
import java.lang.management.ThreadMXBean;
/**
* Helpers for working with JMX mbeans.
*/
public final class Jmx {
private Jmx() {
}
/**
* Add meters for the standard MXBeans provided by the jvm. This method will use
* {@link java.lang.management.ManagementFactory#getPlatformMXBeans(Class)} to get the set of
* mbeans from the local jvm.
*/
public static void registerStandardMXBeans(Registry registry) {
monitorClassLoadingMXBean(registry);
monitorThreadMXBean(registry);
monitorCompilationMXBean(registry);
maybeRegisterHotspotInternal(registry);
for (MemoryPoolMXBean mbean : ManagementFactory.getMemoryPoolMXBeans()) {
registry.register(new MemoryPoolMeter(registry, mbean));
}
for (BufferPoolMXBean mbean : ManagementFactory.getPlatformMXBeans(BufferPoolMXBean.class)) {
registry.register(new BufferPoolMeter(registry, mbean));
}
}
private static void monitorClassLoadingMXBean(Registry registry) {
ClassLoadingMXBean classLoadingMXBean = ManagementFactory.getClassLoadingMXBean();
PolledMeter.using(registry)
.withName("jvm.classloading.classesLoaded")
.monitorMonotonicCounter(classLoadingMXBean, ClassLoadingMXBean::getTotalLoadedClassCount);
PolledMeter.using(registry)
.withName("jvm.classloading.classesUnloaded")
.monitorMonotonicCounter(classLoadingMXBean, ClassLoadingMXBean::getUnloadedClassCount);
}
private static void monitorThreadMXBean(Registry registry) {
ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean();
PolledMeter.using(registry)
.withName("jvm.thread.threadsStarted")
.monitorMonotonicCounter(threadMXBean, ThreadMXBean::getTotalStartedThreadCount);
Gauge nonDaemonThreadCount = registry.gauge("jvm.thread.threadCount", "id", "non-daemon");
Gauge daemonThreadCount = registry.gauge("jvm.thread.threadCount", "id", "daemon");
PolledMeter.poll(registry, () -> {
int threads = threadMXBean.getThreadCount();
int daemonThreads = threadMXBean.getDaemonThreadCount();
nonDaemonThreadCount.set(Math.max(0, threads - daemonThreads));
daemonThreadCount.set(daemonThreads);
});
}
private static void monitorCompilationMXBean(Registry registry) {
CompilationMXBean compilationMXBean = ManagementFactory.getCompilationMXBean();
if (compilationMXBean.isCompilationTimeMonitoringSupported()) {
PolledMeter.using(registry)
.withName("jvm.compilation.compilationTime")
.withTag("compiler", compilationMXBean.getName())
.monitorMonotonicCounterDouble(compilationMXBean, c -> c.getTotalCompilationTime() / 1000.0);
}
}
private static void maybeRegisterHotspotInternal(Registry registry) {
if (HotspotRuntime.isSupported()) {
// The safepointCount is reported as the count for both the safepointTime and
// safepointSyncTime. This should allow the metrics to work as normal timers and
// for the user to compute the average time spent per operation.
Object mbean = HotspotRuntime.getRuntimeMBean();
PolledMeter.using(registry)
.withName("jvm.hotspot.safepointTime")
.withTag(Statistic.count)
.monitorMonotonicCounter(mbean, b -> HotspotRuntime.getSafepointCount());
PolledMeter.using(registry)
.withName("jvm.hotspot.safepointTime")
.withTag(Statistic.totalTime)
.monitorMonotonicCounterDouble(mbean, b -> HotspotRuntime.getSafepointTime() / 1000.0);
PolledMeter.using(registry)
.withName("jvm.hotspot.safepointSyncTime")
.withTag(Statistic.count)
.monitorMonotonicCounter(mbean, b -> HotspotRuntime.getSafepointCount());
PolledMeter.using(registry)
.withName("jvm.hotspot.safepointSyncTime")
.withTag(Statistic.totalTime)
.monitorMonotonicCounterDouble(mbean, b -> HotspotRuntime.getSafepointSyncTime() / 1000.0);
}
}
/**
* Add meters based on configured JMX queries. See the {@link JmxConfig} class for more
* details.
*
* @param registry
* Registry to use for reporting the data.
* @param cfg
* Config object with the mappings.
*/
public static void registerMappingsFromConfig(Registry registry, Config cfg) {
registry.register(new JmxMeter(registry, JmxConfig.from(cfg)));
}
}
| 6,020 |
0 | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator/jvm/MappingExpr.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import java.beans.Introspector;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.Map;
import java.util.function.DoubleBinaryOperator;
/**
* Helper for handling basic expressions uses as part of the mapping config.
*/
final class MappingExpr {
private MappingExpr() {
}
/**
* Substitute named variables in the pattern string with the corresponding
* values in the variables map.
*
* @param pattern
* Pattern string with placeholders, name surounded by curly braces, e.g.:
* {@code {variable name}}.
* @param vars
* Map of variable substitutions that are available.
* @return
* String with values substituted in. If no matching key is found for a
* placeholder, then it will not be modified and left in place.
*/
@SuppressWarnings("PMD.NPathComplexity")
static String substitute(String pattern, Map<String, String> vars) {
int openBracePos = pattern.indexOf('{');
if (openBracePos == -1) {
return pattern;
}
int closeBracePos = pattern.indexOf('}', openBracePos);
if (closeBracePos == -1) {
return pattern;
}
StringBuilder builder = new StringBuilder(pattern.length());
int startPos = 0;
while (startPos < pattern.length()) {
builder.append(pattern, startPos, openBracePos);
String var = pattern.substring(openBracePos + 1, closeBracePos);
boolean useRawValue = var.startsWith("raw:");
String value = useRawValue
? vars.get(var.substring("raw:".length()))
: vars.get(var);
if (value == null) {
builder.append('{').append(var).append('}');
} else {
builder.append(useRawValue ? value : Introspector.decapitalize(value));
}
startPos = closeBracePos + 1;
openBracePos = pattern.indexOf('{', startPos);
if (openBracePos == -1) {
break;
}
closeBracePos = pattern.indexOf('}', openBracePos);
if (closeBracePos == -1) {
break;
}
}
if (startPos < pattern.length()) {
builder.append(pattern, startPos, pattern.length());
}
return builder.toString();
}
/**
* Evaluate a simple stack expression for the value.
*
* @param expr
* Basic stack expression that supports placeholders, numeric constants,
* and basic binary operations (:add, :sub, :mul, :div).
* @param vars
* Map of variable substitutions that are available.
* @return
* Double value for the expression. If the expression cannot be evaluated
* properly, then null will be returned.
*/
@SuppressWarnings("PMD")
static Double eval(String expr, Map<String, ? extends Number> vars) {
Deque<Double> stack = new ArrayDeque<>();
String[] parts = expr.split("[,\\s]+");
for (String part : parts) {
switch (part) {
case ":add": binaryOp(stack, (a, b) -> a + b); break;
case ":sub": binaryOp(stack, (a, b) -> a - b); break;
case ":mul": binaryOp(stack, (a, b) -> a * b); break;
case ":div": binaryOp(stack, (a, b) -> a / b); break;
case ":if-changed": ifChanged(stack); break;
default:
if (part.startsWith("{") && part.endsWith("}")) {
Number v = vars.get(part.substring(1, part.length() - 1));
if (v == null) v = Double.NaN;
stack.addFirst(v.doubleValue());
} else {
stack.addFirst(Double.parseDouble(part));
}
break;
}
}
return stack.removeFirst();
}
private static void binaryOp(Deque<Double> stack, DoubleBinaryOperator op) {
double b = stack.removeFirst();
double a = stack.removeFirst();
stack.addFirst(op.applyAsDouble(a, b));
}
/**
* Helper to zero out a value if there is not a change. For a stack with {@code num v1 v2},
* if {@code v1 == v2}, then push 0.0 otherwise push {@code num}.
*
* For some values placed in JMX they are not regularly updated in all circumstances and
* reporting the same value for each polling iteration gives the false impression of activity
* when there is none. A common example is timers with the metrics library where the reservoir
* is not rescaled during a fetch.
*
* https://github.com/dropwizard/metrics/issues/1030
*
* This operator can be used in conjunction with the previous variables to zero out the
* misleading snapshots based on the count. For example:
*
* <pre>
* {50thPercentile},{Count},{previous:Count},:if-changed
* </pre>
*/
private static void ifChanged(Deque<Double> stack) {
double v2 = stack.removeFirst();
double v1 = stack.removeFirst();
double num = stack.removeFirst();
stack.addFirst((Double.compare(v1, v2) == 0) ? 0.0 : num);
}
}
| 6,021 |
0 | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator/jvm/MemoryPoolMeter.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import com.netflix.spectator.api.*;
import java.lang.management.MemoryPoolMXBean;
import java.lang.management.MemoryUsage;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Wraps a {@link java.lang.management.MemoryPoolMXBean} so it can be registered with spectator.
*/
class MemoryPoolMeter extends AbstractMeter<MemoryPoolMXBean> {
private final Id usedId;
private final Id committedId;
private final Id maxId;
/**
* Creates a new instance.
*
* @param registry
* Spectator registry to use for naming and clock source.
* @param mbean
* Mbean to collect the data from.
*/
MemoryPoolMeter(Registry registry, MemoryPoolMXBean mbean) {
super(registry.clock(), registry.createId("jvm.memory").withTag("id", mbean.getName()), mbean);
usedId = registry.createId("jvm.memory.used").withTag("id", mbean.getName());
committedId = registry.createId("jvm.memory.committed").withTag("id", mbean.getName());
maxId = registry.createId("jvm.memory.max").withTag("id", mbean.getName());
}
@Override
public Iterable<Measurement> measure() {
final MemoryPoolMXBean mbean = ref.get();
if (mbean == null) {
return Collections.emptyList();
}
final long timestamp = clock.wallTime();
final List<Measurement> ms = new ArrayList<>(3);
final String typeKey = "memtype";
final String type = mbean.getType().name();
final MemoryUsage usage = mbean.getUsage();
ms.add(new Measurement(usedId.withTag(typeKey, type), timestamp, usage.getUsed()));
ms.add(new Measurement(committedId.withTag(typeKey, type), timestamp, usage.getCommitted()));
ms.add(new Measurement(maxId.withTag(typeKey, type), timestamp, usage.getMax()));
return ms;
}
}
| 6,022 |
0 | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/main/java/com/netflix/spectator/jvm/BufferPoolMeter.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import com.netflix.spectator.api.*;
import java.lang.management.BufferPoolMXBean;
import java.util.ArrayList;
import java.util.List;
/**
* Wraps a {@link java.lang.management.BufferPoolMXBean} so it can be registered with spectator.
*/
class BufferPoolMeter extends AbstractMeter<BufferPoolMXBean> {
private static Id meterId(Registry registry, String id) {
return registry.createId("jvm.buffer").withTag("id", id);
}
private static Id bufferCountId(Registry registry, String id) {
return registry.createId("jvm.buffer.count").withTag("id", id);
}
private static Id bufferMemoryUsedId(Registry registry, String id) {
return registry.createId("jvm.buffer.memoryUsed").withTag("id", id);
}
private final Id countId;
private final Id memoryUsedId;
/**
* Creates a new instance.
*
* @param registry
* Spectator registry to use for naming and clock source.
* @param mbean
* Mbean to collect the data from.
*/
BufferPoolMeter(Registry registry, BufferPoolMXBean mbean) {
super(registry.clock(), meterId(registry, mbean.getName()), mbean);
countId = bufferCountId(registry, mbean.getName());
memoryUsedId = bufferMemoryUsedId(registry, mbean.getName());
}
@Override public Iterable<Measurement> measure() {
final long timestamp = clock.wallTime();
final BufferPoolMXBean mbean = ref.get();
final List<Measurement> ms = new ArrayList<>();
if (mbean != null) {
ms.add(new Measurement(countId, timestamp, mbean.getCount()));
ms.add(new Measurement(memoryUsedId, timestamp, mbean.getMemoryUsed()));
}
return ms;
}
}
| 6,023 |
0 | Create_ds/spectator/spectator-ext-jvm/src/jmh/java/com/netflix/spectator | Create_ds/spectator/spectator-ext-jvm/src/jmh/java/com/netflix/spectator/jvm/MappingExprSubstitute.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.jvm;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.infra.Blackhole;
import java.beans.Introspector;
import java.util.*;
/**
* <pre>
* ## Java 8
*
* Benchmark Mode Cnt Score Error Units
* customReplaceMatch thrpt 5 32004441.157 ± 1124280.222 ops/s
* customReplaceNoMatch thrpt 5 265951976.539 ± 3763154.472 ops/s
* stringReplaceMatch thrpt 5 793429.235 ± 11693.098 ops/s
* stringReplaceNoMatch thrpt 5 813017.866 ± 17047.396 ops/s
*
*
* Benchmark Mode Cnt Score Error Units
* customReplaceMatch gc.alloc.rate.norm 5 160.012 ± 0.001 B/op
* customReplaceNoMatch gc.alloc.rate.norm 5 0.001 ± 0.001 B/op
* stringReplaceMatch gc.alloc.rate.norm 5 17616.448 ± 0.029 B/op
* stringReplaceNoMatch gc.alloc.rate.norm 5 17616.442 ± 0.045 B/op
*
* ## Java 17
*
* Benchmark Mode Cnt Score Error Units
* customReplaceMatch thrpt 5 22809472.437 ± 1075058.765 ops/s
* customReplaceNoMatch thrpt 5 529952552.582 ± 7374375.509 ops/s
* stringReplaceMatch thrpt 5 2247841.555 ± 58645.893 ops/s
* stringReplaceNoMatch thrpt 5 2156524.729 ± 498269.673 ops/s
*
* Benchmark Mode Cnt Score Error Units
* customReplaceMatch gc.alloc.rate.norm 5 160.019 ± 0.001 B/op
* customReplaceNoMatch gc.alloc.rate.norm 5 0.001 ± 0.001 B/op
* stringReplaceMatch gc.alloc.rate.norm 5 888.197 ± 0.001 B/op
* stringReplaceNoMatch gc.alloc.rate.norm 5 888.201 ± 0.036 B/op
* </pre>
*/
@State(Scope.Thread)
public class MappingExprSubstitute {
private static final Map<String, String> VARS = new HashMap<>();
static {
VARS.put("keyspace", "test");
VARS.put("scope", "foo");
VARS.put("name", "ReadLatency");
VARS.put("type", "ColumnFamily");
VARS.put("LatencyUnit", "MICROSECONDS");
VARS.put("RateUnit", "SECONDS");
VARS.put("EventType", "calls");
}
static String substituteString(String pattern, Map<String, String> vars) {
String value = pattern;
for (Map.Entry<String, String> entry : vars.entrySet()) {
String raw = entry.getValue();
String v = Introspector.decapitalize(raw);
value = value.replace("{raw:" + entry.getKey() + "}", raw);
value = value.replace("{" + entry.getKey() + "}", v);
}
return value;
}
@Benchmark
public void customReplaceMatch(Blackhole bh) {
bh.consume(MappingExpr.substitute("{keyspace}", VARS));
}
@Benchmark
public void customReplaceNoMatch(Blackhole bh) {
bh.consume(MappingExpr.substitute("abcdefghi", VARS));
}
@Benchmark
public void stringReplaceMatch(Blackhole bh) {
bh.consume(substituteString("{keyspace}", VARS));
}
@Benchmark
public void stringReplaceNoMatch(Blackhole bh) {
bh.consume(substituteString("abcdefghi", VARS));
}
}
| 6,024 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/CountingManualClock.java | /*
* Copyright 2022-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import java.util.concurrent.atomic.AtomicLong;
import com.netflix.spectator.api.ManualClock;
/**
* Clock implementation that allows the user to explicitly control the time, and also
* keeps a count of the number of times it was polled. Used in tests to assert the count of
* times the clock has been called.
*/
public class CountingManualClock extends ManualClock {
private final AtomicLong countPolled;
/** Create a new instance. */
public CountingManualClock() {
this(0L, 0L);
}
/**
* Create a new instance.
*
* @param wallInit
* Initial value for the wall time.
* @param monotonicInit
* Initial value for the monotonic time.
*/
public CountingManualClock(long wallInit, long monotonicInit) {
super(wallInit, monotonicInit);
countPolled = new AtomicLong(0);
}
@Override public long wallTime() {
countPolled.incrementAndGet();
return super.wallTime();
}
@Override public long monotonicTime() {
countPolled.incrementAndGet();
return super.monotonicTime();
}
public long countPolled() {
return countPolled.get();
}
}
| 6,025 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/AtlasConfigTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.impl.AsciiSet;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
public class AtlasConfigTest {
@Test
public void enabledByDefault() {
Map<String, String> props = Collections.emptyMap();
AtlasConfig config = props::get;
Assertions.assertTrue(config.enabled());
}
@Test
public void explicitlyEnabled() {
Map<String, String> props = new HashMap<>();
props.put("atlas.enabled", "true");
AtlasConfig config = props::get;
Assertions.assertTrue(config.enabled());
}
@Test
public void explicitlyDisabled() {
Map<String, String> props = new HashMap<>();
props.put("atlas.enabled", "false");
AtlasConfig config = props::get;
Assertions.assertFalse(config.enabled());
}
@Test
public void enabledBadValue() {
Map<String, String> props = new HashMap<>();
props.put("atlas.enabled", "abc");
AtlasConfig config = props::get;
Assertions.assertFalse(config.enabled());
}
@Test
public void lwcDisabledByDefault() {
Map<String, String> props = Collections.emptyMap();
AtlasConfig config = props::get;
Assertions.assertFalse(config.lwcEnabled());
}
@Test
public void lwcExplicitlyEnabled() {
Map<String, String> props = new HashMap<>();
props.put("atlas.lwc.enabled", "true");
AtlasConfig config = props::get;
Assertions.assertTrue(config.lwcEnabled());
}
@Test
public void lwcExplicitlyDisabled() {
Map<String, String> props = new HashMap<>();
props.put("atlas.lwc.enabled", "false");
AtlasConfig config = props::get;
Assertions.assertFalse(config.lwcEnabled());
}
@Test
public void lwcEnabledBadValue() {
Map<String, String> props = new HashMap<>();
props.put("atlas.lwc.enabled", "abc");
AtlasConfig config = props::get;
Assertions.assertFalse(config.lwcEnabled());
}
@Test
public void defaultValidChars() {
Map<String, String> props = Collections.emptyMap();
AtlasConfig config = props::get;
AsciiSet set = AsciiSet.fromPattern(config.validTagCharacters());
// quick sanity check of the allowed values
Assertions.assertTrue(set.contains('7'));
Assertions.assertTrue(set.contains('c'));
Assertions.assertTrue(set.contains('C'));
Assertions.assertTrue(set.contains('~'));
Assertions.assertTrue(set.contains('^'));
Assertions.assertTrue(set.contains('_'));
Assertions.assertFalse(set.contains('!'));
Assertions.assertFalse(set.contains('%'));
Assertions.assertFalse(set.contains('/'));
Assertions.assertFalse(set.contains(':'));
}
}
| 6,026 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/AtlasRegistryTest.java | /*
* Copyright 2014-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonToken;
import com.fasterxml.jackson.dataformat.smile.SmileFactory;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.ManualClock;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.NoopRegistry;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.atlas.impl.DefaultPublisher;
import com.netflix.spectator.atlas.impl.PublishPayload;
import com.netflix.spectator.ipc.IpcLogger;
import com.netflix.spectator.ipc.http.HttpClient;
import com.netflix.spectator.ipc.http.HttpRequestBuilder;
import com.netflix.spectator.ipc.http.HttpResponse;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.URI;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.zip.GZIPInputStream;
public class AtlasRegistryTest {
private ManualClock clock;
private AtlasRegistry registry;
private AtlasConfig newConfig() {
Map<String, String> props = new LinkedHashMap<>();
props.put("atlas.enabled", "false");
props.put("atlas.step", "PT10S");
props.put("atlas.lwc.step", "PT10S");
props.put("atlas.batchSize", "3");
return new AtlasConfig() {
@Override public String get(String k) {
return props.get(k);
}
@Override public Registry debugRegistry() {
return new NoopRegistry();
}
};
}
private List<Measurement> getMeasurements() {
clock.setWallTime(clock.wallTime() + 10000);
return registry.measurements().collect(Collectors.toList());
}
private List<List<Measurement>> getBatches() {
long step = 10000;
if (clock.wallTime() == 0L) {
clock.setWallTime(step);
}
long t = clock.wallTime() / step * step;
registry.pollMeters(t);
return registry
.getBatches(t)
.stream()
.map(RollupPolicy.Result::measurements)
.collect(Collectors.toList());
}
@BeforeEach
public void before() {
clock = new ManualClock();
registry = new AtlasRegistry(clock, newConfig());
}
@Test
public void measurementsEmpty() {
Assertions.assertEquals(0, getMeasurements().size());
}
@Test
public void measurementsWithCounter() {
registry.counter("test").increment();
Assertions.assertEquals(1, getMeasurements().size());
}
@Test
public void measurementsWithTimer() {
registry.timer("test").record(42, TimeUnit.NANOSECONDS);
Assertions.assertEquals(4, getMeasurements().size());
}
@Test
public void measurementsWithDistributionSummary() {
registry.distributionSummary("test").record(42);
Assertions.assertEquals(4, getMeasurements().size());
}
@Test
public void measurementsWithGauge() {
registry.gauge("test").set(4.0);
Assertions.assertEquals(1, getMeasurements().size());
}
@Test
public void measurementsIgnoresNaN() {
registry.gauge("test").set(Double.NaN);
Assertions.assertEquals(0, getMeasurements().size());
}
@Test
public void measurementsWithMaxGauge() {
registry.maxGauge(registry.createId("test")).set(4.0);
Gauge g = registry.maxGauge("test");
Assertions.assertEquals(1, getMeasurements().size());
}
@Test
public void batchesEmpty() {
Assertions.assertEquals(0, getBatches().size());
}
@Test
public void batchesExact() {
for (int i = 0; i < 9; ++i) {
registry.counter("" + i).increment();
}
Assertions.assertEquals(3, getBatches().size());
for (List<Measurement> batch : getBatches()) {
Assertions.assertEquals(3, batch.size());
}
}
@Test
public void batchesLastPartial() {
for (int i = 0; i < 7; ++i) {
registry.counter("" + i).increment();
}
List<List<Measurement>> batches = getBatches();
Assertions.assertEquals(3, batches.size());
for (int i = 0; i < batches.size(); ++i) {
Assertions.assertEquals((i < 2) ? 3 : 1, batches.get(i).size());
}
}
@Test
public void initialDelayTooCloseToStart() {
long d = newConfig().initialPollingDelay(clock, 10000);
Assertions.assertEquals(1000, d);
}
@Test
public void initialDelayTooCloseToEnd() {
clock.setWallTime(19123);
long d = newConfig().initialPollingDelay(clock, 10000);
Assertions.assertEquals(9000, d);
}
@Test
public void initialDelayOk() {
clock.setWallTime(12123);
long d = newConfig().initialPollingDelay(clock, 10000);
Assertions.assertEquals(2123, d);
}
@Test
public void initialDelayTooCloseToStartSmallStep() {
long d = newConfig().initialPollingDelay(clock, 5000);
Assertions.assertEquals(500, d);
}
@Test
public void initialDelayTooCloseToEndSmallStep() {
clock.setWallTime(19623);
long d = newConfig().initialPollingDelay(clock, 5000);
Assertions.assertEquals(877, d);
}
@Test
public void batchesExpiration() {
for (int i = 0; i < 9; ++i) {
registry.counter("" + i).increment();
}
Assertions.assertEquals(3, getBatches().size());
for (List<Measurement> batch : getBatches()) {
Assertions.assertEquals(3, batch.size());
}
clock.setWallTime(Duration.ofMinutes(15).toMillis() + 1);
registry.removeExpiredMeters();
Assertions.assertEquals(0, getBatches().size());
}
@Test
public void keepsNonExpired() {
for (int i = 0; i < 9; ++i) {
registry.counter("" + i).increment();
}
registry.sendToAtlas();
Assertions.assertEquals(3, getBatches().size());
}
@Test
public void removesExpired() {
for (int i = 0; i < 9; ++i) {
registry.counter("" + i).increment();
}
clock.setWallTime(Duration.ofMinutes(15).toMillis() + 1);
registry.sendToAtlas();
Assertions.assertEquals(0, getBatches().size());
}
@Test
public void shutdownWithoutStarting() {
AtlasRegistry r = new AtlasRegistry(
Clock.SYSTEM,
k -> k.equals("atlas.enabled") ? "true" : null);
r.close();
}
@Test
public void flushOnShutdown() {
List<PublishPayload> payloads = new ArrayList<>();
HttpClient client = uri -> new TestRequestBuilder(uri, payloads);
ManualClock c = new ManualClock();
AtlasRegistry r = new AtlasRegistry(c, new TestConfig(client), client);
r.start();
c.setWallTime(58_000);
r.maxGauge("test").set(1.0);
c.setWallTime(62_000);
r.maxGauge("test").set(2.0);
r.close();
Assertions.assertEquals(2, payloads.size());
Assertions.assertEquals(1.0, getValue(payloads.get(0)));
Assertions.assertEquals(2.0, getValue(payloads.get(1)));
}
@Test
public void flushOnShutdownCounter() {
List<PublishPayload> payloads = new ArrayList<>();
HttpClient client = uri -> new TestRequestBuilder(uri, payloads);
ManualClock c = new ManualClock();
AtlasRegistry r = new AtlasRegistry(c, new TestConfig(client), client);
r.start();
c.setWallTime(58_000);
r.counter("test").increment();
c.setWallTime(62_000);
r.counter("test").add(60.0);
r.close();
Assertions.assertEquals(2, payloads.size());
Assertions.assertEquals(1.0 / 60.0, getValue(payloads.get(0)));
Assertions.assertEquals(1.0, getValue(payloads.get(1)));
}
private double getValue(PublishPayload payload) {
return payload.getMetrics()
.stream()
.filter(m -> m.id().name().equals("test"))
.mapToDouble(Measurement::value)
.sum();
}
private static class TestConfig implements AtlasConfig {
private final HttpClient client;
TestConfig(HttpClient client) {
this.client = client;
}
@Override public String get(String k) {
return null;
}
@Override public boolean enabled() {
return true;
}
@Override public long initialPollingDelay(Clock clock, long stepSize) {
// use a long delay to avoid actually sending unless triggered by tests
return 6_000_000;
}
@Override public Publisher publisher() {
return new DefaultPublisher(this, client);
}
}
private static class TestRequestBuilder extends HttpRequestBuilder {
private static final JsonFactory FACTORY = new SmileFactory();
private final List<PublishPayload> payloads;
TestRequestBuilder(URI uri, List<PublishPayload> payloads) {
super(new IpcLogger(new NoopRegistry()), uri);
this.payloads = payloads;
}
private Map<String, String> decodeTags(JsonParser parser) throws IOException {
Map<String, String> tags = new HashMap<>();
parser.nextToken();
while (parser.nextToken() == JsonToken.FIELD_NAME) {
String k = parser.getCurrentName();
String v = parser.nextTextValue();
tags.put(k, v);
}
return tags;
}
private Measurement decodeMeasurement(JsonParser parser) throws IOException {
Map<String, String> tags = Collections.emptyMap();
long timestamp = -1L;
double value = Double.NaN;
while (parser.nextToken() == JsonToken.FIELD_NAME) {
String field = parser.getCurrentName();
switch (field) {
case "tags":
tags = decodeTags(parser);
break;
case "timestamp":
timestamp = parser.nextLongValue(-1L);
break;
case "value":
parser.nextToken();
value = parser.getDoubleValue();
break;
default:
throw new IllegalArgumentException("unexpected field: " + field);
}
}
String name = tags.remove("name");
Id id = Id.create(name).withTags(tags);
return new Measurement(id, timestamp, value);
}
private PublishPayload decodePayload(JsonParser parser) throws IOException {
Map<String, String> tags = Collections.emptyMap();
List<Measurement> metrics = new ArrayList<>();
parser.nextToken();
while (parser.nextToken() == JsonToken.FIELD_NAME) {
String field = parser.getCurrentName();
switch (field) {
case "tags":
tags = decodeTags(parser);
break;
case "metrics":
parser.nextToken();
while (parser.nextToken() == JsonToken.START_OBJECT) {
metrics.add(decodeMeasurement(parser));
}
break;
default:
throw new IllegalArgumentException("unexpected field: " + field);
}
}
return new PublishPayload(tags, metrics);
}
@Override public HttpRequestBuilder withContent(String type, byte[] content) {
try {
try (GZIPInputStream in = new GZIPInputStream(new ByteArrayInputStream(content));
JsonParser parser = FACTORY.createParser(in)) {
payloads.add(decodePayload(parser));
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return this;
}
@Override protected HttpResponse sendImpl() throws IOException {
return new HttpResponse(200, Collections.emptyMap());
}
}
}
| 6,027 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/AtlasTimerTest.java | /*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Timer;
import com.netflix.spectator.api.Utils;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
public class AtlasTimerTest {
private final CountingManualClock clock = new CountingManualClock();
private final long step = 10000L;
private final AtlasTimer dist = new AtlasTimer(Id.create("test"), clock, step, step);
private void checkValue(long count, double amount, double square, long max) {
int num = 0;
for (Measurement m : dist.measure()) {
String stat = Utils.getTagValue(m.id(), "statistic");
DsType ds = "max".equals(stat) ? DsType.gauge : DsType.rate;
Id expectedId = dist.id().withTag(ds).withTag("statistic", stat);
Assertions.assertEquals(expectedId, m.id());
switch (stat) {
case "count":
Assertions.assertEquals(count / 10.0, m.value(), 1e-12);
break;
case "totalTime":
Assertions.assertEquals(amount / 10.0e9, m.value(), 1e-12);
break;
case "totalOfSquares":
Assertions.assertEquals(square / 10.0e18, m.value(), 1e-12);
break;
case "max":
Assertions.assertEquals(max / 1e9, m.value(), 1e-12);
break;
default:
throw new IllegalArgumentException("unknown stat: " + stat);
}
Assertions.assertEquals(count, dist.count());
// This method cannot handle overflows
if (amount < Long.MAX_VALUE) {
Assertions.assertEquals(amount, dist.totalTime());
}
++num;
}
Assertions.assertEquals(4, num);
}
@Test
public void measuredIdHasDsType() {
checkValue(0, 0, 0, 0);
}
@Test
public void recordOne() {
dist.record(1, TimeUnit.NANOSECONDS);
checkValue(0, 0, 0, 0);
clock.setWallTime(step + 1);
checkValue(1, 1, 1, 1);
}
@Test
public void recordTwo() {
dist.record(2, TimeUnit.NANOSECONDS);
checkValue(0, 0, 0, 0);
clock.setWallTime(step + 1);
checkValue(1, 2, 4, 2);
}
@Test
public void recordMillis() {
dist.record(2, TimeUnit.MILLISECONDS);
clock.setWallTime(step + 1);
checkValue(1, 2000000L, 4000000000000L, 2000000L);
}
@Test
public void recordOverflow() {
// Simulate case where we have old items in a queue and when processing again we record
// the ages of many of those items in a short span
long amount = TimeUnit.DAYS.toNanos(14);
double square = 0.0;
for (int i = 0; i < 10000; ++i) {
dist.record(amount, TimeUnit.NANOSECONDS);
square += (double) amount * amount;
}
clock.setWallTime(step + 1);
checkValue(10000, 10e3 * amount, square, amount);
}
@Test
public void recordSquaresOverflow() {
long v = (long) (Math.sqrt(Long.MAX_VALUE) / 1e9) + 1;
dist.record(v, TimeUnit.SECONDS);
clock.setWallTime(step + 1);
double square = 1e18 * v * v;
checkValue(1, v * 1000000000L, square, v * 1000000000L);
}
@Test
public void recordZero() {
dist.record(0, TimeUnit.NANOSECONDS);
clock.setWallTime(step + 1);
checkValue(1, 0, 0, 0);
}
@Test
public void recordNegativeValue() {
dist.record(-2, TimeUnit.NANOSECONDS);
clock.setWallTime(step + 1);
checkValue(1, 0, 0, 0);
}
@Test
public void recordSeveralValues() {
dist.record(1, TimeUnit.NANOSECONDS);
dist.record(2, TimeUnit.NANOSECONDS);
dist.record(3, TimeUnit.NANOSECONDS);
dist.record(1, TimeUnit.NANOSECONDS);
clock.setWallTime(step + 1);
checkValue(4, 1 + 2 + 3 + 1, 1 + 4 + 9 + 1, 3);
}
public void recordSeveralValuesBatch(int batchSize) throws Exception {
try (Timer.BatchUpdater b = dist.batchUpdater(batchSize)) {
b.record(1, TimeUnit.NANOSECONDS);
b.record(2, TimeUnit.NANOSECONDS);
b.record(3, TimeUnit.NANOSECONDS);
b.record(1, TimeUnit.NANOSECONDS);
}
clock.setWallTime(step + 1);
checkValue(4, 1 + 2 + 3 + 1, 1 + 4 + 9 + 1, 3);
}
@Test
public void recordSeveralValuesBatch() throws Exception {
recordSeveralValuesBatch(1);
recordSeveralValuesBatch(2);
recordSeveralValuesBatch(3);
recordSeveralValuesBatch(4);
recordSeveralValuesBatch(5);
}
@Test
public void recordBatchMismatchedLengths() {
dist.record(new long[0], 1, TimeUnit.NANOSECONDS);
clock.setWallTime(1 * step + 1);
checkValue(0, 0, 0, 0);
dist.record(new long[1], 0, TimeUnit.NANOSECONDS);
clock.setWallTime(2 * step + 1);
checkValue(0, 0, 0, 0);
dist.record(new long[1], -1, TimeUnit.NANOSECONDS);
clock.setWallTime(3 * step + 1);
checkValue(0, 0, 0, 0);
dist.record(new long[]{ 0, 0 }, 2, TimeUnit.NANOSECONDS);
clock.setWallTime(4 * step + 1);
checkValue(2, 0, 0, 0);
}
@Test
public void recordBatchZero() {
dist.record(new long[]{ 0 }, 1, TimeUnit.NANOSECONDS);
checkValue(0, 0, 0, 0);
clock.setWallTime(step + 1);
checkValue(1, 0, 0, 0);
}
@Test
public void recordBatchOne() {
dist.record(new long[]{ 1 }, 1, TimeUnit.NANOSECONDS);
checkValue(0, 0, 0, 0);
clock.setWallTime(step + 1);
checkValue(1, 1, 1, 1);
}
@Test
public void recordBatchTwo() {
dist.record(new long[]{ 2 }, 1, TimeUnit.NANOSECONDS);
checkValue(0, 0, 0, 0);
clock.setWallTime(step + 1);
checkValue(1, 2, 4, 2);
}
@Test
public void recordBatchOverflowSingle() {
// Simulate case where we have old items in a queue and when processing again we record
// the ages of many of those items in a short span
long amount = TimeUnit.DAYS.toNanos(14);
double square = 0.0;
for (int i = 0; i < 10000; ++i) {
dist.record(new long[]{ amount }, 1, TimeUnit.NANOSECONDS);
square += (double) amount * amount;
}
clock.setWallTime(step + 1);
checkValue(10000, 10e3 * amount, square, amount);
}
@Test
public void recordBatchOverflowBatch() {
// Simulate case where we have old items in a queue and when processing again we record
// the ages of many of those items in a short span
long amount = TimeUnit.DAYS.toNanos(14);
double square = 0.0;
int COUNT = 10000;
long[] amounts = new long[COUNT];
Arrays.fill(amounts, amount);
dist.record(amounts, COUNT, TimeUnit.NANOSECONDS);
for (int i = 0; i < 10000; i++) {
square += (double) amount * amount;
}
clock.setWallTime(step + 1);
checkValue(10000, 10e3 * amount, square, amount);
}
@Test
public void recordBatchMixedPositiveNegativeValues() {
dist.record(new long[]{ 1, 0, 2, -1, 3, -4, 1}, 7, TimeUnit.NANOSECONDS);
clock.setWallTime(step + 1);
checkValue(7, 1 + 2 + 3 + 1, 1 + 4 + 9 + 1, 3);
}
@Test
public void recordBatchSeveralValues() {
dist.record(new long[]{ 1, 2, 3, 1}, 4, TimeUnit.NANOSECONDS);
clock.setWallTime(step + 1);
checkValue(4, 1 + 2 + 3 + 1, 1 + 4 + 9 + 1, 3);
}
@Test
public void recordBatchPollsClockOnce() {
long[] amounts = new long[10000];
Arrays.fill(amounts, 1L);
long countPollsBefore = clock.countPolled();
dist.record(amounts, amounts.length, TimeUnit.NANOSECONDS);
long actualPolls = clock.countPolled() - countPollsBefore;
Assertions.assertEquals(1, actualPolls);
}
@Test
public void recordRunnable() {
dist.record(() -> clock.setMonotonicTime(clock.monotonicTime() + 2));
clock.setWallTime(step + 1);
checkValue(1, 2, 4, 2);
}
@Test
public void recordCallable() throws Exception {
String s = dist.record(() -> {
clock.setMonotonicTime(clock.monotonicTime() + 2);
return "foo";
});
Assertions.assertEquals("foo", s);
clock.setWallTime(step + 1);
checkValue(1, 2, 4, 2);
}
@Test
public void rollForward() {
dist.record(42, TimeUnit.NANOSECONDS);
clock.setWallTime(step + 1);
checkValue(1, 42, 42 * 42, 42);
clock.setWallTime(step + step + 1);
checkValue(0, 0, 0, 0);
}
@Test
public void expiration() {
long start = clock.wallTime();
clock.setWallTime(start + step * 2);
Assertions.assertTrue(dist.hasExpired());
dist.record(42, TimeUnit.MICROSECONDS);
Assertions.assertFalse(dist.hasExpired());
clock.setWallTime(start + step * 3 + 1);
Assertions.assertTrue(dist.hasExpired());
}
}
| 6,028 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/StepClockTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.ManualClock;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class StepClockTest {
@Test
public void wallTime() {
ManualClock mc = new ManualClock();
StepClock sc = new StepClock(mc, 10000);
mc.setWallTime(5000);
Assertions.assertEquals(0L, sc.wallTime());
mc.setWallTime(10000);
Assertions.assertEquals(10000L, sc.wallTime());
mc.setWallTime(20212);
Assertions.assertEquals(20000L, sc.wallTime());
}
@Test
public void monotonicTime() {
ManualClock mc = new ManualClock();
StepClock sc = new StepClock(mc, 10000);
mc.setMonotonicTime(5000);
Assertions.assertEquals(5000L, sc.monotonicTime());
mc.setMonotonicTime(10000);
Assertions.assertEquals(10000L, sc.monotonicTime());
mc.setMonotonicTime(20212);
Assertions.assertEquals(20212L, sc.monotonicTime());
}
}
| 6,029 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/AtlasGaugeTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.ManualClock;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Statistic;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class AtlasGaugeTest {
private final ManualClock clock = new ManualClock();
private final long step = 10000L;
private final AtlasGauge gauge = new AtlasGauge(Id.create("test"), clock, step);
private void checkValue(long expected) {
int count = 0;
for (Measurement m : gauge.measure()) {
Assertions.assertEquals(gauge.id().withTags(Statistic.gauge, DsType.gauge), m.id());
Assertions.assertEquals(expected, m.value(), 1e-12);
++count;
}
Assertions.assertEquals(1, count);
}
@Test
public void measuredIdHasDsType() {
checkValue(0);
}
@Test
public void set() {
gauge.set(42);
checkValue(42);
clock.setWallTime(step + 1);
checkValue(42);
}
@Test
public void measureNaN() {
gauge.set(Double.NaN);
Assertions.assertFalse(gauge.measure().iterator().hasNext());
}
@Test
public void rollForward() {
gauge.set(42);
clock.setWallTime(step + 1);
checkValue(42);
clock.setWallTime(step + step + 1);
checkValue(42);
}
@Test
public void expiration() {
long start = clock.wallTime();
clock.setWallTime(start + step * 2);
Assertions.assertTrue(gauge.hasExpired());
gauge.set(1);
Assertions.assertFalse(gauge.hasExpired());
clock.setWallTime(start + step * 3 + 1);
Assertions.assertTrue(gauge.hasExpired());
gauge.set(1);
Assertions.assertFalse(gauge.hasExpired());
}
}
| 6,030 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/ValidCharactersTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.impl.AsciiSet;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
// Suite for removed ValidCharacters class. Test case was kept and changed to use AsciiSet
// to help verify the new validation.
public class ValidCharactersTest {
private final AsciiSet set = AsciiSet.fromPattern("-._A-Za-z0-9");
private String toValidCharset(String str) {
return set.replaceNonMembers(str, '_');
}
@Test
public void nullValue() {
Assertions.assertThrows(NullPointerException.class, () -> toValidCharset(null));
}
@Test
public void empty() {
String input = "";
String actual = toValidCharset(input);
Assertions.assertEquals("", actual);
}
@Test
public void allValid() {
String input = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-";
String actual = toValidCharset(input);
Assertions.assertEquals(input, actual);
}
@Test
public void invalidConvertsToUnderscore() {
String input = "a,b%c^d&e|f{g}h:i;";
String actual = toValidCharset(input);
Assertions.assertEquals("a_b_c_d_e_f_g_h_i_", actual);
}
}
| 6,031 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/RollupPolicyTest.java | /*
* Copyright 2014-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import nl.jqno.equalsverifier.EqualsVerifier;
import org.junit.jupiter.api.Test;
public class RollupPolicyTest {
@Test
public void ruleEqualsContract() {
EqualsVerifier
.forClass(RollupPolicy.Rule.class)
.withNonnullFields("query", "rollup")
.verify();
}
@Test
public void resultEqualsContract() {
EqualsVerifier
.forClass(RollupPolicy.Result.class)
.withNonnullFields("commonTags", "measurements")
.verify();
}
}
| 6,032 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/RollupsTest.java | /*
* Copyright 2014-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.ManualClock;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.NoopRegistry;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Statistic;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.api.Utils;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
public class RollupsTest {
private ManualClock clock;
private AtlasRegistry registry;
@BeforeEach
public void before() {
clock = new ManualClock();
AtlasConfig config = new AtlasConfig() {
@Override public String get(String k) {
return "atlas.step".equals(k) ? "PT5S" : null;
}
@Override public Registry debugRegistry() {
return new NoopRegistry();
}
};
registry = new AtlasRegistry(clock, config);
}
private Id removeIdxTag(Id id) {
List<Tag> filtered = new ArrayList<>();
for (Tag t : id.tags()) {
if (!"i".equals(t.key())) {
filtered.add(t);
}
}
return registry.createId(id.name()).withTags(filtered);
}
@Test
public void aggregateCounters() {
for (int i = 0; i < 10; ++i) {
registry.counter("test", "i", "" + i).increment();
}
clock.setWallTime(5000);
List<Measurement> input = registry.measurements().collect(Collectors.toList());
List<Measurement> aggr = Rollups.aggregate(this::removeIdxTag, input);
Assertions.assertEquals(1, aggr.size());
Measurement m = aggr.get(0);
Id id = registry.createId("test")
.withTag("atlas.dstype", "sum")
.withTag(Statistic.count);
Assertions.assertEquals(id, m.id());
Assertions.assertEquals(10.0 / 5.0, m.value(), 1e-12);
}
@Test
public void aggregateGauges() {
for (int i = 0; i < 10; ++i) {
registry.gauge("test", "i", "" + i).set(2.0);
}
clock.setWallTime(5000);
List<Measurement> input = registry.measurements().collect(Collectors.toList());
List<Measurement> aggr = Rollups.aggregate(this::removeIdxTag, input);
Assertions.assertEquals(1, aggr.size());
Measurement m = aggr.get(0);
Id id = registry.createId("test")
.withTag("atlas.dstype", "gauge")
.withTag(Statistic.gauge);
Assertions.assertEquals(id, m.id());
Assertions.assertEquals(2.0, m.value(), 1e-12);
}
@Test
public void aggregateGaugesWithNaN() {
for (int i = 0; i < 10; ++i) {
double v = (i % 2 == 0) ? i : Double.NaN;
registry.gauge("test", "i", "" + i).set(v);
}
clock.setWallTime(5000);
List<Measurement> input = registry.measurements().collect(Collectors.toList());
List<Measurement> aggr = Rollups.aggregate(this::removeIdxTag, input);
Assertions.assertEquals(1, aggr.size());
Measurement m = aggr.get(0);
Id id = registry.createId("test")
.withTag("atlas.dstype", "gauge")
.withTag(Statistic.gauge);
Assertions.assertEquals(id, m.id());
Assertions.assertEquals(8.0, m.value(), 1e-12);
}
@Test
public void aggregateTimers() {
for (int i = 0; i < 10; ++i) {
registry.timer("test", "i", "" + i).record(i, TimeUnit.SECONDS);
}
clock.setWallTime(5000);
List<Measurement> input = registry.measurements().collect(Collectors.toList());
List<Measurement> aggr = Rollups.aggregate(this::removeIdxTag, input);
Assertions.assertEquals(4, aggr.size());
for (Measurement m : aggr) {
Id id = registry.createId("test");
switch (Utils.getTagValue(m.id(), "statistic")) {
case "count":
id = id.withTag("atlas.dstype", "sum").withTag(Statistic.count);
Assertions.assertEquals(id, m.id());
Assertions.assertEquals(10.0 / 5.0, m.value(), 1e-12);
break;
case "totalTime":
id = id.withTag("atlas.dstype", "sum").withTag(Statistic.totalTime);
Assertions.assertEquals(id, m.id());
Assertions.assertEquals(45.0 / 5.0, m.value(), 1e-12);
break;
case "totalOfSquares":
id = id.withTag("atlas.dstype", "sum").withTag(Statistic.totalOfSquares);
Assertions.assertEquals(id, m.id());
Assertions.assertEquals(285.0 / 5.0, m.value(), 1e-12);
break;
case "max":
id = id.withTag("atlas.dstype", "gauge").withTag(Statistic.max);
Assertions.assertEquals(id, m.id());
Assertions.assertEquals(9.0, m.value(), 1e-12);
break;
default:
Assertions.fail("unexpected id: " + m.id());
break;
}
}
}
@Test
public void aggregateDistributionSummaries() {
for (int i = 0; i < 10; ++i) {
registry.distributionSummary("test", "i", "" + i).record(i);
}
clock.setWallTime(5000);
List<Measurement> input = registry.measurements().collect(Collectors.toList());
List<Measurement> aggr = Rollups.aggregate(this::removeIdxTag, input);
Assertions.assertEquals(4, aggr.size());
for (Measurement m : aggr) {
Id id = registry.createId("test");
switch (Utils.getTagValue(m.id(), "statistic")) {
case "count":
id = id.withTag("atlas.dstype", "sum").withTag(Statistic.count);
Assertions.assertEquals(id, m.id());
Assertions.assertEquals(10.0 / 5.0, m.value(), 1e-12);
break;
case "totalAmount":
id = id.withTag("atlas.dstype", "sum").withTag(Statistic.totalAmount);
Assertions.assertEquals(id, m.id());
Assertions.assertEquals(45.0 / 5.0, m.value(), 1e-12);
break;
case "totalOfSquares":
id = id.withTag("atlas.dstype", "sum").withTag(Statistic.totalOfSquares);
Assertions.assertEquals(id, m.id());
Assertions.assertEquals(285.0 / 5.0, m.value(), 1e-12);
break;
case "max":
id = id.withTag("atlas.dstype", "gauge").withTag(Statistic.max);
Assertions.assertEquals(id, m.id());
Assertions.assertEquals(9.0, m.value(), 1e-12);
break;
default:
Assertions.fail("unexpected id: " + m.id());
break;
}
}
}
private static Map<String, String> map(String... ts) {
Map<String, String> m = new HashMap<>();
for (int i = 0; i < ts.length; i += 2) {
m.put(ts[i], ts[i + 1]);
}
return m;
}
private static List<String> list(String... vs) {
return Arrays.asList(vs);
}
@Test
public void fromRulesEmpty() {
for (int i = 0; i < 10; ++i) {
registry.counter("test", "i", "" + i).increment();
}
clock.setWallTime(5000);
List<Measurement> input = registry.measurements().collect(Collectors.toList());
RollupPolicy policy = Rollups.fromRules(Collections.emptyMap(), Collections.emptyList());
List<RollupPolicy.Result> results = policy.apply(input);
Assertions.assertEquals(1, results.size());
Assertions.assertEquals(10, results.get(0).measurements().size());
}
@Test
public void fromRulesSingle() {
registry.counter("ignored").increment();
for (int i = 0; i < 10; ++i) {
registry.counter("test", "i", "" + i).increment();
}
clock.setWallTime(5000);
List<Measurement> input = registry.measurements().collect(Collectors.toList());
List<RollupPolicy.Rule> rules = new ArrayList<>();
rules.add(new RollupPolicy.Rule("name,test,:eq", list("i")));
RollupPolicy policy = Rollups.fromRules(map("app", "foo", "node", "i-123"), rules);
List<RollupPolicy.Result> results = policy.apply(input);
Assertions.assertEquals(1, results.size());
Assertions.assertEquals(2, results.get(0).measurements().size());
RollupPolicy.Result result = results.get(0);
Assertions.assertEquals(map("app", "foo", "node", "i-123"), result.commonTags());
}
@Test
public void fromRulesMulti() {
registry.counter("removeNode").increment();
for (int i = 0; i < 10; ++i) {
registry.counter("test", "i", "" + i).increment();
}
clock.setWallTime(5000);
List<Measurement> input = registry.measurements().collect(Collectors.toList());
List<RollupPolicy.Rule> rules = new ArrayList<>();
rules.add(new RollupPolicy.Rule("i,:has", list("i")));
rules.add(new RollupPolicy.Rule("name,removeNode,:eq", list("node")));
RollupPolicy policy = Rollups.fromRules(map("app", "foo", "node", "i-123"), rules);
List<RollupPolicy.Result> results = policy.apply(input);
Assertions.assertEquals(2, results.size());
for (RollupPolicy.Result result : results) {
Assertions.assertEquals(1, result.measurements().size());
String name = result.measurements().get(0).id().name();
if ("removeNode".equals(name)) {
Assertions.assertEquals(map("app", "foo"), result.commonTags());
} else {
Assertions.assertEquals(map("app", "foo", "node", "i-123"), result.commonTags());
}
}
}
}
| 6,033 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/AtlasDistributionSummaryTest.java | /*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import java.util.Arrays;
import com.netflix.spectator.api.DistributionSummary;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Utils;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class AtlasDistributionSummaryTest {
private final CountingManualClock clock = new CountingManualClock();
private final long step = 10000L;
private final AtlasDistributionSummary dist = new AtlasDistributionSummary(Id.create("test"), clock, step, step);
private void checkValue(long count, long amount, long square, long max) {
int num = 0;
for (Measurement m : dist.measure()) {
String stat = Utils.getTagValue(m.id(), "statistic");
DsType ds = "max".equals(stat) ? DsType.gauge : DsType.rate;
Id expectedId = dist.id().withTag(ds).withTag("statistic", stat);
Assertions.assertEquals(expectedId, m.id());
switch (stat) {
case "count":
Assertions.assertEquals(count / 10.0, m.value(), 1e-12);
break;
case "totalAmount":
Assertions.assertEquals(amount / 10.0, m.value(), 1e-12);
break;
case "totalOfSquares":
Assertions.assertEquals(square / 10.0, m.value(), 1e-12);
break;
case "max":
Assertions.assertEquals(max, m.value(), 1e-12);
break;
default:
throw new IllegalArgumentException("unknown stat: " + stat);
}
Assertions.assertEquals(count, dist.count());
Assertions.assertEquals(amount, dist.totalAmount());
++num;
}
Assertions.assertEquals(4, num);
}
@Test
public void measuredIdHasDsType() {
checkValue(0, 0, 0, 0);
}
@Test
public void recordOne() {
dist.record(1);
checkValue(0, 0, 0, 0);
clock.setWallTime(step + 1);
checkValue(1, 1, 1, 1);
}
@Test
public void recordTwo() {
dist.record(2);
checkValue(0, 0, 0, 0);
clock.setWallTime(step + 1);
checkValue(1, 2, 4, 2);
}
@Test
public void recordZero() {
dist.record(0);
clock.setWallTime(step + 1);
checkValue(1, 0, 0, 0);
}
@Test
public void recordNegativeValue() {
dist.record(-2);
clock.setWallTime(step + 1);
checkValue(1, 0, 0, 0);
}
@Test
public void recordSeveralValues() {
dist.record(1);
dist.record(2);
dist.record(3);
dist.record(1);
clock.setWallTime(step + 1);
checkValue(4, 1 + 2 + 3 + 1, 1 + 4 + 9 + 1, 3);
}
public void recordSeveralValuesBatch(int batchSize) throws Exception {
try (DistributionSummary.BatchUpdater b = dist.batchUpdater(batchSize)) {
b.record(1);
b.record(2);
b.record(3);
b.record(1);
}
clock.setWallTime(step + 1);
checkValue(4, 1 + 2 + 3 + 1, 1 + 4 + 9 + 1, 3);
}
@Test
public void recordSeveralValuesBatch() throws Exception {
recordSeveralValuesBatch(1);
recordSeveralValuesBatch(2);
recordSeveralValuesBatch(3);
recordSeveralValuesBatch(4);
recordSeveralValuesBatch(5);
}
@Test
public void recordBatchMismatchedLengths() {
dist.record(new long[0], 1);
clock.setWallTime(1 * step + 1);
checkValue(0, 0, 0, 0);
dist.record(new long[1], 0);
clock.setWallTime(2 * step + 1);
checkValue(0, 0, 0, 0);
dist.record(new long[1], -1);
clock.setWallTime(3 * step + 1);
checkValue(0, 0, 0, 0);
dist.record(new long[]{ 0, 0 }, 2);
clock.setWallTime(4 * step + 1);
checkValue(2, 0, 0, 0);
}
@Test
public void recordBatchOne() {
dist.record(new long[]{ 1 }, 1);
checkValue(0, 0, 0, 0);
clock.setWallTime(step + 1);
checkValue(1, 1, 1, 1);
}
@Test
public void recordBatchTwo() {
dist.record(new long[]{ 2 }, 1);
checkValue(0, 0, 0, 0);
clock.setWallTime(step + 1);
checkValue(1, 2, 4, 2);
}
@Test
public void recordBatchSeveralValues() {
dist.record(new long[]{ 1, 2, 3, 1 }, 4);
checkValue(0, 0, 0, 0);
clock.setWallTime(step + 1);
checkValue(4, 1 + 2 + 3 + 1, 1 + 4 + 9 + 1, 3);
}
@Test
public void recordBatchWithIgnoredValuesMixed() {
dist.record(new long[]{ 1, -1, 0, 2, -1, 0, 3, 0, -1, 1 }, 10);
checkValue(0, 0, 0, 0);
clock.setWallTime(step + 1);
checkValue(10, 1 + 2 + 3 + 1, 1 + 4 + 9 + 1, 3);
}
@Test
public void recordBatchPollsClockOnce() {
long[] amounts = new long[10000];
Arrays.fill(amounts, 1L);
long countPollsBefore = clock.countPolled();
dist.record(amounts, amounts.length);
long actualPolls = clock.countPolled() - countPollsBefore;
Assertions.assertEquals(1, actualPolls);
}
@Test
public void rollForward() {
dist.record(42);
clock.setWallTime(step + 1);
checkValue(1, 42, 42 * 42, 42);
clock.setWallTime(step + step + 1);
checkValue(0, 0, 0, 0);
}
@Test
public void expiration() {
long start = clock.wallTime();
clock.setWallTime(start + step * 2);
Assertions.assertTrue(dist.hasExpired());
dist.record(42);
Assertions.assertFalse(dist.hasExpired());
clock.setWallTime(start + step * 3 + 1);
Assertions.assertTrue(dist.hasExpired());
}
}
| 6,034 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/SubscriptionManagerTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.netflix.spectator.api.ManualClock;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.atlas.impl.MeasurementSerializer;
import com.netflix.spectator.atlas.impl.Subscription;
import com.netflix.spectator.atlas.impl.Subscriptions;
import com.netflix.spectator.impl.AsciiSet;
import com.netflix.spectator.ipc.http.HttpClient;
import com.netflix.spectator.ipc.http.HttpRequestBuilder;
import com.netflix.spectator.ipc.http.HttpResponse;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
public class SubscriptionManagerTest {
private final SimpleModule module = new SimpleModule()
.addSerializer(
Measurement.class,
new MeasurementSerializer(s -> AsciiSet.fromPattern("a-z").replaceNonMembers(s, '_')));
private final ObjectMapper mapper = new ObjectMapper(new JsonFactory()).registerModule(module);
private SubscriptionManager newInstance(ManualClock clock, HttpResponse... responses) {
final AtomicInteger pos = new AtomicInteger();
HttpClient client = uri -> new HttpRequestBuilder(HttpClient.DEFAULT_LOGGER, uri) {
@Override public HttpResponse send() {
return responses[pos.getAndIncrement()];
}
};
Map<String, String> config = new HashMap<>();
config.put("atlas.lwc.ignore-publish-step", "false");
return new SubscriptionManager(mapper, client, clock, config::get);
}
private Set<Subscription> set(Subscription... subs) {
return new HashSet<>(Arrays.asList(subs));
}
private byte[] json(Subscription... subs) throws Exception {
Subscriptions payload = new Subscriptions().withExpressions(Arrays.asList(subs));
return mapper.writeValueAsBytes(payload);
}
private Subscription sub(int i) {
return new Subscription()
.withId("" + i)
.withExpression("name," + i + ",:eq,:sum")
.withFrequency(60000);
}
private HttpResponse ok(byte[] data) {
try {
return (new HttpResponse(200, Collections.emptyMap(), data)).compress();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Test
public void emptyExpressionList() {
ManualClock clock = new ManualClock();
byte[] data = "{\"expressions\":[]}".getBytes(StandardCharsets.UTF_8);
SubscriptionManager mgr = newInstance(clock, ok(data));
mgr.refresh();
Assertions.assertTrue(mgr.subscriptions().isEmpty());
}
@Test
public void unknownField() {
ManualClock clock = new ManualClock();
byte[] data = "{\"expressions\":[{\"expression\":\"name,1,:eq,:sum\",\"id\":\"1\",\"frequency\":60000,\"foo\":\"bar\"}]}".getBytes(StandardCharsets.UTF_8);
SubscriptionManager mgr = newInstance(clock, ok(data));
mgr.refresh();
Assertions.assertEquals(set(sub(1)), new HashSet<>(mgr.subscriptions()));
}
@Test
public void singleExpression() throws Exception {
ManualClock clock = new ManualClock();
byte[] data = json(sub(1));
SubscriptionManager mgr = newInstance(clock, ok(data));
mgr.refresh();
Assertions.assertEquals(set(sub(1)), new HashSet<>(mgr.subscriptions()));
}
@Test
public void singleExpressionIgnoreMissing() throws Exception {
ManualClock clock = new ManualClock();
byte[] data = json(sub(1));
HttpClient client = uri -> new HttpRequestBuilder(HttpClient.DEFAULT_LOGGER, uri) {
@Override public HttpResponse send() {
return ok(data);
}
};
SubscriptionManager mgr = new SubscriptionManager(mapper, client, clock, v -> null);
mgr.refresh();
Assertions.assertTrue(mgr.subscriptions().isEmpty());
}
@Test
public void singleExpressionIgnoreExplicit() throws Exception {
ManualClock clock = new ManualClock();
byte[] data = json(sub(1));
HttpClient client = uri -> new HttpRequestBuilder(HttpClient.DEFAULT_LOGGER, uri) {
@Override public HttpResponse send() {
return ok(data);
}
};
Map<String, String> config = new HashMap<>();
config.put("atlas.lwc.ignore-publish-step", "true");
SubscriptionManager mgr = new SubscriptionManager(mapper, client, clock, config::get);
mgr.refresh();
Assertions.assertTrue(mgr.subscriptions().isEmpty());
}
@Test
public void expiration() throws Exception {
ManualClock clock = new ManualClock();
byte[] data1 = json(sub(1), sub(2)); // Initial set of 2 expressions
byte[] data2 = json(sub(2)); // Final set with only expression 2
SubscriptionManager mgr = newInstance(clock, ok(data1), ok(data2), ok(data2));
mgr.refresh();
Assertions.assertEquals(set(sub(1), sub(2)), new HashSet<>(mgr.subscriptions()));
// Should still contain 1 because it hasn't expired
mgr.refresh();
Assertions.assertEquals(set(sub(1), sub(2)), new HashSet<>(mgr.subscriptions()));
// Should have removed 1 because it has expired
clock.setWallTime(Duration.ofMinutes(20).toMillis());
mgr.refresh();
Assertions.assertEquals(set(sub(2)), new HashSet<>(mgr.subscriptions()));
}
@Test
public void startsFailing() throws Exception {
ManualClock clock = new ManualClock();
byte[] data = json(sub(1));
HttpResponse ok = new HttpResponse(200, Collections.emptyMap(), data);
HttpResponse error = new HttpResponse(500, Collections.emptyMap(), new byte[0]);
SubscriptionManager mgr = newInstance(clock, ok, error);
mgr.refresh();
Assertions.assertEquals(set(sub(1)), new HashSet<>(mgr.subscriptions()));
// Double check it is not expired
clock.setWallTime(Duration.ofMinutes(20).toMillis());
mgr.refresh();
Assertions.assertEquals(set(sub(1)), new HashSet<>(mgr.subscriptions()));
}
@Test
public void alwaysFailing() throws Exception {
ManualClock clock = new ManualClock();
byte[] data = json(sub(1));
HttpResponse error = new HttpResponse(500, Collections.emptyMap(), new byte[0]);
SubscriptionManager mgr = newInstance(clock, error, error);
mgr.refresh();
Assertions.assertTrue(mgr.subscriptions().isEmpty());
// Double check it is not expired
clock.setWallTime(Duration.ofMinutes(20).toMillis());
mgr.refresh();
Assertions.assertTrue(mgr.subscriptions().isEmpty());
}
@Test
public void notModified() throws Exception {
ManualClock clock = new ManualClock();
byte[] data = json(sub(1));
Map<String, List<String>> headers =
Collections.singletonMap("ETag", Collections.singletonList("12345"));
HttpResponse ok = new HttpResponse(200, headers, data);
HttpResponse notModified = new HttpResponse(304, Collections.emptyMap(), new byte[0]);
SubscriptionManager mgr = newInstance(clock, ok, notModified);
mgr.refresh();
Assertions.assertEquals(set(sub(1)), new HashSet<>(mgr.subscriptions()));
// Double check it is not expired
clock.setWallTime(Duration.ofMinutes(20).toMillis());
mgr.refresh();
Assertions.assertEquals(set(sub(1)), new HashSet<>(mgr.subscriptions()));
}
@Test
public void invalidPayload() {
ManualClock clock = new ManualClock();
byte[] data = "[]".getBytes(StandardCharsets.UTF_8);
SubscriptionManager mgr = newInstance(clock, ok(data));
mgr.refresh();
Assertions.assertTrue(mgr.subscriptions().isEmpty());
}
}
| 6,035 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/AtlasMaxGaugeTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.ManualClock;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Statistic;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class AtlasMaxGaugeTest {
private final ManualClock clock = new ManualClock();
private final long step = 10000L;
private final AtlasMaxGauge gauge = new AtlasMaxGauge(Id.create("test"), clock, step, step);
private void checkValue(double expected) {
int count = 0;
for (Measurement m : gauge.measure()) {
Assertions.assertEquals(gauge.id().withTags(Statistic.max, DsType.gauge), m.id());
Assertions.assertEquals(expected, m.value(), 1e-12);
++count;
}
Assertions.assertEquals(Double.isFinite(expected) ? 1 : 0, count);
}
@Test
public void measuredIdHasDsType() {
checkValue(Double.NaN);
}
@Test
public void set() {
gauge.set(42);
checkValue(Double.NaN);
clock.setWallTime(step + 1);
checkValue(42);
}
@Test
public void setNaN() {
gauge.set(0);
gauge.set(Double.NaN);
checkValue(Double.NaN);
clock.setWallTime(step + 1);
checkValue(0);
}
@Test
public void setInfinity() {
gauge.set(Double.POSITIVE_INFINITY);
checkValue(Double.NaN);
clock.setWallTime(step + 1);
checkValue(Double.NaN);
}
@Test
public void setNegative() {
gauge.set(-1);
checkValue(Double.NaN);
clock.setWallTime(step + 1);
checkValue(-1);
}
@Test
public void multipleSets() {
gauge.set(42);
gauge.set(44);
gauge.set(43);
checkValue(Double.NaN);
clock.setWallTime(step + 1);
checkValue(44);
}
@Test
public void rollForward() {
gauge.set(42);
clock.setWallTime(step + 1);
checkValue(42);
clock.setWallTime(step + step + 1);
checkValue(Double.NaN);
}
@Test
public void expiration() {
long start = clock.wallTime();
clock.setWallTime(start + step * 2);
Assertions.assertTrue(gauge.hasExpired());
gauge.set(1);
Assertions.assertFalse(gauge.hasExpired());
clock.setWallTime(start + step * 3 + 1);
Assertions.assertTrue(gauge.hasExpired());
gauge.set(1);
Assertions.assertFalse(gauge.hasExpired());
}
@Test
public void measureTimestamp() {
long start = clock.wallTime();
gauge.set(0.0);
clock.setWallTime(start + step);
Assertions.assertEquals(start + step, gauge.measure().iterator().next().timestamp());
gauge.set(0.0);
clock.setWallTime(start + step * 2);
Assertions.assertEquals(start + step * 2, gauge.measure().iterator().next().timestamp());
}
}
| 6,036 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/AtlasCounterTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.ManualClock;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Statistic;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class AtlasCounterTest {
private final ManualClock clock = new ManualClock();
private final long step = 10000L;
private final AtlasCounter counter = new AtlasCounter(Id.create("test"), clock, step, step);
private void checkValue(double expected) {
int count = 0;
for (Measurement m : counter.measure()) {
Assertions.assertEquals(counter.id().withTags(Statistic.count, DsType.rate), m.id());
Assertions.assertEquals(expected / 10.0, m.value(), 1e-12);
Assertions.assertEquals(expected, counter.actualCount(), 1e-12);
++count;
}
Assertions.assertEquals(1, count);
}
@Test
public void measuredIdHasDsType() {
checkValue(0);
}
@Test
public void increment() {
counter.increment();
checkValue(0);
clock.setWallTime(step + 1);
checkValue(1);
}
@Test
public void incrementAmount() {
counter.increment(42);
checkValue(0);
clock.setWallTime(step + 1);
checkValue(42);
}
@Test
public void addAmount() {
counter.add(42.1);
clock.setWallTime(step + 1);
checkValue(42.1);
}
@Test
public void addNegativeAmount() {
counter.add(-42.0);
clock.setWallTime(step + 1);
checkValue(0.0);
}
@Test
public void addNaN() {
counter.add(1.0);
counter.add(Double.NaN);
clock.setWallTime(step + 1);
checkValue(1.0);
}
@Test
public void addInfinity() {
counter.add(Double.POSITIVE_INFINITY);
clock.setWallTime(step + 1);
checkValue(0.0);
}
@Test
public void rollForward() {
counter.increment(42);
clock.setWallTime(step + 1);
checkValue(42);
clock.setWallTime(step + step + 1);
checkValue(0);
}
@Test
public void expiration() {
long start = clock.wallTime();
clock.setWallTime(start + step * 2);
Assertions.assertTrue(counter.hasExpired());
counter.increment();
Assertions.assertFalse(counter.hasExpired());
clock.setWallTime(start + step * 3 + 1);
Assertions.assertTrue(counter.hasExpired());
counter.increment(42L);
Assertions.assertFalse(counter.hasExpired());
}
@Test
public void preferStatisticFromTags() {
Id id = Id.create("test").withTag(Statistic.percentile);
AtlasCounter c = new AtlasCounter(id, clock, step, step);
Id actual = c.measure().iterator().next().id();
Assertions.assertEquals(id.withTag(DsType.rate), actual);
}
}
| 6,037 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/PrefixTreeTest.java | /*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
public class PrefixTreeTest {
private List<String> list(String... values) {
return Arrays.asList(values);
}
private List<String> sort(List<String> values) {
values.sort(String::compareTo);
return values;
}
private void assertSize(PrefixTree<?> tree, int expected) {
Assertions.assertEquals(expected, tree.size());
Assertions.assertEquals(expected == 0, tree.isEmpty());
}
@Test
public void nullPrefix() {
PrefixTree<String> tree = new PrefixTree<>();
tree.put(null, "1");
assertSize(tree, 1);
Assertions.assertEquals(list("1"), tree.get("foo"));
Assertions.assertEquals(list("1"), tree.get("bar"));
Assertions.assertEquals(list("1"), tree.get(""));
Assertions.assertFalse(tree.remove(null, "2"));
Assertions.assertTrue(tree.remove(null, "1"));
assertSize(tree, 0);
Assertions.assertEquals(Collections.emptyList(), tree.get("foo"));
}
@Test
public void emptyPrefix() {
PrefixTree<String> tree = new PrefixTree<>();
tree.put("", "1");
Assertions.assertEquals(list("1"), tree.get("foo"));
Assertions.assertEquals(list("1"), tree.get("bar"));
Assertions.assertEquals(list("1"), tree.get(""));
Assertions.assertFalse(tree.remove("", "2"));
Assertions.assertTrue(tree.remove("", "1"));
Assertions.assertEquals(Collections.emptyList(), tree.get("foo"));
}
@Test
public void simplePrefix() {
PrefixTree<String> tree = new PrefixTree<>();
tree.put("abc", "1");
assertSize(tree, 1);
Assertions.assertEquals(list("1"), tree.get("abcdef"));
Assertions.assertEquals(list("1"), tree.get("abcghi"));
Assertions.assertEquals(list("1"), tree.get("abc"));
Assertions.assertEquals(Collections.emptyList(), tree.get("abd"));
Assertions.assertEquals(Collections.emptyList(), tree.get("ab"));
Assertions.assertTrue(tree.remove("abc", "1"));
Assertions.assertFalse(tree.remove("abc", "1"));
assertSize(tree, 0);
Assertions.assertEquals(Collections.emptyList(), tree.get("abcdef"));
}
@Test
public void multipleMatches() {
PrefixTree<String> tree = new PrefixTree<>();
tree.put("abc", "1");
tree.put("ab", "2");
tree.put("a", "3");
tree.put("abc", "4");
assertSize(tree, 4);
Assertions.assertEquals(list("1", "2", "3", "4"), sort(tree.get("abcdef")));
Assertions.assertEquals(list("2", "3"), sort(tree.get("abdef")));
Assertions.assertEquals(list("3"), tree.get("adef"));
Assertions.assertEquals(Collections.emptyList(), tree.get("bcdef"));
Assertions.assertFalse(tree.remove("ab", "1"));
Assertions.assertTrue(tree.remove("abc", "1"));
assertSize(tree, 3);
Assertions.assertEquals(list("2", "3", "4"), sort(tree.get("abcdef")));
}
@Test
public void unsupportedCharInPrefix() {
PrefixTree<String> tree = new PrefixTree<>();
tree.put("aβc", "1");
assertSize(tree, 1);
Assertions.assertEquals(list("1"), tree.get("abcdef"));
Assertions.assertEquals(list("1"), tree.get("abcghi"));
Assertions.assertEquals(list("1"), tree.get("abc"));
Assertions.assertEquals(list("1"), tree.get("abd"));
Assertions.assertEquals(list("1"), tree.get("ab"));
Assertions.assertEquals(Collections.emptyList(), tree.get("b"));
Assertions.assertTrue(tree.remove("aβc", "1"));
assertSize(tree, 0);
Assertions.assertEquals(Collections.emptyList(), tree.get("abcdef"));
}
}
| 6,038 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/PublishPayloadTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import nl.jqno.equalsverifier.EqualsVerifier;
import nl.jqno.equalsverifier.Warning;
import org.junit.jupiter.api.Test;
public class PublishPayloadTest {
@Test
public void equalsContract() {
EqualsVerifier.forClass(PublishPayload.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
}
| 6,039 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/EvaluatorTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.ManualClock;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.NoopRegistry;
import com.netflix.spectator.api.Registry;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
public class EvaluatorTest {
private final ManualClock clock = new ManualClock();
private final Registry registry = new DefaultRegistry(clock);
private List<Measurement> data(String name, double... vs) {
List<Measurement> ms = new ArrayList<>();
for (int i = 0; i < vs.length; ++i) {
String pos = String.format("%03d", i);
String value = String.format("%f", vs[i]);
ms.add(new Measurement(registry.createId(name, "i", pos, "v", value), 0L, vs[i]));
}
return ms;
}
private List<Measurement> counterData(String name, double... vs) {
List<Measurement> ms = new ArrayList<>();
for (int i = 0; i < vs.length; ++i) {
String pos = String.format("%03d", i);
String value = String.format("%f", vs[i]);
Id id = registry.createId(name, "i", pos, "v", value, "statistic", "count");
ms.add(new Measurement(id, 0L, vs[i]));
}
return ms;
}
private Evaluator newEvaluator(String... commonTags) {
return newEvaluator(false, commonTags);
}
private Evaluator newEvaluator(boolean delayGauge, String... commonTags) {
EvaluatorConfig config = new EvaluatorConfig() {
@Override public long evaluatorStepSize() {
return 5000L;
}
@Override public Map<String, String> commonTags() {
return tags(commonTags);
}
@Override public boolean delayGaugeAggregation() {
return delayGauge;
}
};
return new Evaluator(config);
}
private Map<String, String> tags(String... ts) {
Map<String, String> map = new HashMap<>();
for (int i = 0; i < ts.length; i += 2) {
map.put(ts[i], ts[i + 1]);
}
return map;
}
private Subscription newSubscription(String name, String expr) {
return new Subscription().withId(name).withExpression(expr).withFrequency(5000);
}
private EvalPayload sort(EvalPayload p) {
List<EvalPayload.Metric> ms = p.getMetrics();
ms.sort(Comparator.comparing(EvalPayload.Metric::getId));
return new EvalPayload(p.getTimestamp(), p.getMetrics());
}
@Test
public void noSubsForGroup() {
Evaluator evaluator = newEvaluator();
EvalPayload payload = evaluator.eval(0L);
EvalPayload expected = new EvalPayload(0L, Collections.emptyList());
Assertions.assertEquals(expected, payload);
}
@Test
public void sumAndMaxForGroup() {
List<Subscription> subs = new ArrayList<>();
subs.add(newSubscription("sum", ":true,:sum"));
subs.add(newSubscription("max", ":true,:max"));
Evaluator evaluator = newEvaluator();
evaluator.sync(subs);
EvalPayload payload = evaluator.eval(0L, data("foo", 1.0, 2.0, 3.0));
List<EvalPayload.Metric> metrics = new ArrayList<>();
metrics.add(new EvalPayload.Metric("max", Collections.emptyMap(), 3.0));
metrics.add(new EvalPayload.Metric("sum", Collections.emptyMap(), 6.0));
EvalPayload expected = new EvalPayload(0L, metrics);
Assertions.assertEquals(expected, sort(payload));
}
@Test
public void updateSub() {
// Eval with sum
List<Subscription> sumSub = new ArrayList<>();
sumSub.add(newSubscription("sum", ":true,:sum"));
Evaluator evaluator = newEvaluator();
evaluator.sync(sumSub);
EvalPayload payload = evaluator.eval(0L, data("foo", 1.0, 2.0, 3.0));
List<EvalPayload.Metric> metrics = new ArrayList<>();
metrics.add(new EvalPayload.Metric("sum", Collections.emptyMap(), 6.0));
EvalPayload expected = new EvalPayload(0L, metrics);
Assertions.assertEquals(expected, payload);
// Update to use max instead
List<Subscription> maxSub = new ArrayList<>();
maxSub.add(newSubscription("sum", ":true,:max"));
evaluator.sync(maxSub);
payload = evaluator.eval(0L, data("foo", 1.0, 2.0, 3.0));
metrics = new ArrayList<>();
metrics.add(new EvalPayload.Metric("sum", Collections.emptyMap(), 3.0));
expected = new EvalPayload(0L, metrics);
Assertions.assertEquals(expected, payload);
}
@Test
public void commonTagsMatch() {
List<Subscription> subs = new ArrayList<>();
subs.add(newSubscription("sum", "app,www,:eq,name,foo,:eq,:and,:sum"));
Evaluator evaluator = newEvaluator("app", "www");
evaluator.sync(subs);
EvalPayload payload = evaluator.eval(0L, data("foo", 1.0, 2.0, 3.0));
List<EvalPayload.Metric> metrics = new ArrayList<>();
metrics.add(new EvalPayload.Metric("sum", tags("app", "www", "name", "foo"), 6.0));
EvalPayload expected = new EvalPayload(0L, metrics);
Assertions.assertEquals(expected, payload);
}
@Test
public void commonTagsNoMatch() {
List<Subscription> subs = new ArrayList<>();
subs.add(newSubscription("sum", "app,abc,:eq,name,foo,:eq,:and,:sum"));
Evaluator evaluator = newEvaluator("app", "www");
evaluator.sync(subs);
EvalPayload payload = evaluator.eval(0L, data("foo", 1.0, 2.0, 3.0));
EvalPayload expected = new EvalPayload(0L, Collections.emptyList());
Assertions.assertEquals(expected, payload);
}
@Test
public void commonTagsGroupBy() {
List<Subscription> subs = new ArrayList<>();
subs.add(newSubscription("sum", "name,foo,:eq,:sum,(,app,),:by"));
Evaluator evaluator = newEvaluator("app", "www");
evaluator.sync(subs);
EvalPayload payload = evaluator.eval(0L, data("foo", 1.0, 2.0, 3.0));
List<EvalPayload.Metric> metrics = new ArrayList<>();
metrics.add(new EvalPayload.Metric("sum", tags("app", "www", "name", "foo"), 6.0));
EvalPayload expected = new EvalPayload(0L, metrics);
Assertions.assertEquals(expected, payload);
}
@Test
public void badExpression() {
List<Subscription> subs = new ArrayList<>();
subs.add(newSubscription("sum", "invalid expression,:foo"));
Evaluator evaluator = newEvaluator("app", "www");
evaluator.sync(subs);
Assertions.assertEquals(0, evaluator.subscriptionCount());
}
@Test
public void delayAggrCounterSum() {
List<Subscription> subs = new ArrayList<>();
subs.add(newSubscription("sum", ":true,:sum"));
Evaluator evaluator = newEvaluator(true);
evaluator.sync(subs);
EvalPayload payload = evaluator.eval(0L, counterData("foo", 1.0, 2.0, 3.0));
Assertions.assertEquals(1, payload.getMetrics().size());
Assertions.assertEquals(
new EvalPayload.Metric("sum", tags(), 6.0),
payload.getMetrics().get(0)
);
}
@Test
public void delayAggrGaugeSum() {
List<Subscription> subs = new ArrayList<>();
subs.add(newSubscription("sum", ":true,:sum"));
Evaluator evaluator = newEvaluator(true);
evaluator.sync(subs);
EvalPayload payload = evaluator.eval(0L, data("foo", 1.0, 2.0, 3.0));
Assertions.assertEquals(3, payload.getMetrics().size());
for (EvalPayload.Metric m : payload.getMetrics()) {
Map<String, String> tags = m.getTags();
Assertions.assertEquals(1, tags.size());
Assertions.assertTrue(tags.containsKey("atlas.aggr"));
}
}
@Test
public void delayAggrGaugeCount() {
List<Subscription> subs = new ArrayList<>();
subs.add(newSubscription("sum", ":true,:count"));
Evaluator evaluator = newEvaluator(true);
evaluator.sync(subs);
EvalPayload payload = evaluator.eval(0L, data("foo", 1.0, 2.0, 3.0));
Assertions.assertEquals(3, payload.getMetrics().size());
for (EvalPayload.Metric m : payload.getMetrics()) {
Map<String, String> tags = m.getTags();
Assertions.assertEquals(1, tags.size());
Assertions.assertTrue(tags.containsKey("atlas.aggr"));
Assertions.assertEquals(1.0, m.getValue());
}
}
@Test
public void delayAggrGaugeGroupByCount() {
List<Subscription> subs = new ArrayList<>();
subs.add(newSubscription("sum", ":true,:count,(,name,),:by"));
Evaluator evaluator = newEvaluator(true);
evaluator.sync(subs);
EvalPayload payload = evaluator.eval(0L, data("foo", 1.0, 2.0, 3.0));
Assertions.assertEquals(3, payload.getMetrics().size());
for (EvalPayload.Metric m : payload.getMetrics()) {
Map<String, String> tags = m.getTags();
Assertions.assertEquals(2, tags.size());
Assertions.assertTrue(tags.containsKey("atlas.aggr"));
Assertions.assertEquals(1.0, m.getValue());
}
}
@Test
public void delayAggrGaugeGroupByMissingKey() {
List<Subscription> subs = new ArrayList<>();
subs.add(newSubscription("sum", ":true,:sum,(,foo,),:by"));
Evaluator evaluator = newEvaluator(true);
evaluator.sync(subs);
EvalPayload payload = evaluator.eval(0L, data("foo", 1.0, 2.0, 3.0));
Assertions.assertEquals(0, payload.getMetrics().size());
}
@Test
public void delayAggrGaugeMax() {
List<Subscription> subs = new ArrayList<>();
subs.add(newSubscription("max", ":true,:max"));
Evaluator evaluator = newEvaluator(true);
evaluator.sync(subs);
EvalPayload payload = evaluator.eval(0L, data("foo", 1.0, 2.0, 3.0));
Assertions.assertEquals(1, payload.getMetrics().size());
Assertions.assertEquals(
new EvalPayload.Metric("max", tags(), 3.0),
payload.getMetrics().get(0)
);
}
@Test
public void delayAggrCombinationSum() {
List<Subscription> subs = new ArrayList<>();
subs.add(newSubscription("sum", ":true,:sum,(,name,),:by"));
List<Measurement> ms = data("gauge", 1.0, 2.0, 3.0);
ms.addAll(counterData("counter", 1.0, 2.0, 3.0));
Evaluator evaluator = newEvaluator(true);
evaluator.sync(subs);
EvalPayload payload = evaluator.eval(0L, ms);
Assertions.assertEquals(4, payload.getMetrics().size());
int counterValues = 0;
int gaugeValues = 0;
for (EvalPayload.Metric m : payload.getMetrics()) {
Map<String, String> tags = m.getTags();
switch (tags.get("name")) {
case "gauge":
Assertions.assertTrue(tags.containsKey("atlas.aggr"));
++gaugeValues;
break;
case "counter":
Assertions.assertFalse(tags.containsKey("atlas.aggr"));
++counterValues;
break;
default:
Assertions.fail();
}
}
Assertions.assertEquals(1, counterValues);
Assertions.assertEquals(3, gaugeValues);
}
}
| 6,040 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/SubscriptionTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import nl.jqno.equalsverifier.EqualsVerifier;
import nl.jqno.equalsverifier.Warning;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class SubscriptionTest {
@Test
public void equalsContract() {
EqualsVerifier.forClass(Subscription.class)
.suppress(Warning.NONFINAL_FIELDS)
.withIgnoredFields("expr")
.verify();
}
@Test
public void dataExpr() {
Subscription sub = new Subscription().withExpression(":true,:sum");
Assertions.assertEquals(new DataExpr.Sum(Query.TRUE), sub.dataExpr());
}
@Test
public void dataExprInvalid() {
Assertions.assertThrows(IllegalArgumentException.class,
() -> new Subscription().withExpression(":true").dataExpr());
}
}
| 6,041 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/QueryTest.java | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Registry;
import nl.jqno.equalsverifier.EqualsVerifier;
import nl.jqno.equalsverifier.Warning;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
public class QueryTest {
private final Registry registry = new DefaultRegistry();
private Query parse(String expr) {
Query q1 = Parser.parseQuery(expr);
Query q2 = Parser.parseQuery(expr);
Assertions.assertEquals(q1, q2);
Query q3 = Parser.parseQuery(q1.toString());
Assertions.assertEquals(q1, q3);
Assertions.assertEquals(q1.toString(), q3.toString());
return q1;
}
@Test
public void trueQuery() {
Query q = parse(":true");
Assertions.assertTrue(q.matches(registry.createId("foo")));
}
@Test
public void falseQuery() {
Query q = parse(":false");
Assertions.assertFalse(q.matches(registry.createId("foo")));
}
@Test
public void eqQuery() {
Query q = parse("name,foo,:eq");
Assertions.assertTrue(q.matches(registry.createId("foo")));
Assertions.assertFalse(q.matches(registry.createId("bar")));
}
@Test
public void eqEqualsContract() {
EqualsVerifier
.forClass(Query.Equal.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void hasQuery() {
Query q = parse("bar,:has");
Assertions.assertFalse(q.matches(registry.createId("bar")));
Assertions.assertTrue(q.matches(registry.createId("foo", "bar", "baz")));
Assertions.assertFalse(q.matches(registry.createId("foo", "baz", "baz")));
}
@Test
public void hasEqualsContract() {
EqualsVerifier
.forClass(Query.Has.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void inQueryEmpty() {
Assertions.assertThrows(IllegalArgumentException.class, () -> parse("name,(,),:in"));
}
@Test
public void inQuerySingle() {
Query q = Parser.parseQuery("name,(,bar,),:in");
Assertions.assertEquals(new Query.Equal("name", "bar"), q);
}
@Test
public void inQuery() {
Query q = parse("name,(,bar,foo,),:in");
Assertions.assertTrue(q.matches(registry.createId("foo")));
Assertions.assertTrue(q.matches(registry.createId("bar")));
Assertions.assertFalse(q.matches(registry.createId("baz")));
}
@Test
public void inEqualsContract() {
EqualsVerifier
.forClass(Query.In.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void ltQuery() {
Query q = parse("name,foo,:lt");
Assertions.assertFalse(q.matches(registry.createId("foo")));
Assertions.assertTrue(q.matches(registry.createId("faa")));
Assertions.assertFalse(q.matches(registry.createId("fzz")));
}
@Test
public void ltEqualsContract() {
EqualsVerifier
.forClass(Query.LessThan.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void leQuery() {
Query q = parse("name,foo,:le");
Assertions.assertTrue(q.matches(registry.createId("foo")));
Assertions.assertTrue(q.matches(registry.createId("faa")));
Assertions.assertFalse(q.matches(registry.createId("fzz")));
}
@Test
public void leEqualsContract() {
EqualsVerifier
.forClass(Query.LessThanEqual.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void gtQuery() {
Query q = parse("name,foo,:gt");
Assertions.assertFalse(q.matches(registry.createId("foo")));
Assertions.assertFalse(q.matches(registry.createId("faa")));
Assertions.assertTrue(q.matches(registry.createId("fzz")));
}
@Test
public void gtEqualsContract() {
EqualsVerifier
.forClass(Query.GreaterThan.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void geQuery() {
Query q = parse("name,foo,:ge");
Assertions.assertTrue(q.matches(registry.createId("foo")));
Assertions.assertFalse(q.matches(registry.createId("faa")));
Assertions.assertTrue(q.matches(registry.createId("fzz")));
}
@Test
public void geEqualsContract() {
EqualsVerifier
.forClass(Query.GreaterThanEqual.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void reQuery() {
Query q = parse("name,foo,:re");
Assertions.assertTrue(q.matches(registry.createId("foo")));
Assertions.assertFalse(q.matches(registry.createId("abcfoo")));
Assertions.assertTrue(q.matches(registry.createId("foobar")));
}
@Test
public void reQueryAlwaysMatches() {
Query q = Parser.parseQuery("name,.*,:re");
Assertions.assertEquals(Parser.parseQuery("name,:has"), q);
}
@Test
public void reicQuery() {
Query q = parse("name,foO,:reic");
Assertions.assertTrue(q.matches(registry.createId("fOo")));
Assertions.assertFalse(q.matches(registry.createId("abcFoo")));
Assertions.assertTrue(q.matches(registry.createId("Foobar")));
}
@Test
public void reEqualsContract() {
EqualsVerifier
.forClass(Query.Regex.class)
.suppress(Warning.NULL_FIELDS, Warning.ALL_FIELDS_SHOULD_BE_USED)
.verify();
}
@Test
public void containsQuery() {
Query q = parse("name,foo,:contains");
Assertions.assertTrue(q.matches(registry.createId("foo")));
Assertions.assertTrue(q.matches(registry.createId("foo_")));
Assertions.assertTrue(q.matches(registry.createId("_foo_")));
Assertions.assertTrue(q.matches(registry.createId("_foo")));
Assertions.assertFalse(q.matches(registry.createId("_Foo_")));
}
@Test
public void containsQueryEscape() {
Query q = parse("name,^$.?*+[](){}\\#&!%,:contains");
Assertions.assertEquals(
"name,.*\\^\\$\\.\\?\\*\\+\\[\\]\\(\\)\\{\\}\\\\#&!%,:re",
q.toString());
Assertions.assertTrue(q.matches(registry.createId("^$.?*+[](){}\\#&!%")));
}
@Test
public void startsQuery() {
Query q = parse("name,foo,:starts");
Assertions.assertTrue(q.matches(registry.createId("foo")));
Assertions.assertTrue(q.matches(registry.createId("foo_")));
Assertions.assertFalse(q.matches(registry.createId("_foo_")));
Assertions.assertFalse(q.matches(registry.createId("_foo")));
Assertions.assertFalse(q.matches(registry.createId("Foo_")));
}
@Test
public void startsQueryEscape() {
Query q = parse("name,^$.?*+[](){}\\#&!%,:starts");
Assertions.assertEquals(
"name,\\^\\$\\.\\?\\*\\+\\[\\]\\(\\)\\{\\}\\\\#&!%,:re",
q.toString());
Assertions.assertTrue(q.matches(registry.createId("^$.?*+[](){}\\#&!%")));
}
@Test
public void endsQuery() {
Query q = parse("name,foo,:ends");
Assertions.assertTrue(q.matches(registry.createId("foo")));
Assertions.assertFalse(q.matches(registry.createId("foo_")));
Assertions.assertFalse(q.matches(registry.createId("_foo_")));
Assertions.assertTrue(q.matches(registry.createId("_foo")));
Assertions.assertFalse(q.matches(registry.createId("_Foo")));
}
@Test
public void endsQueryEscape() {
Query q = parse("name,^$.?*+[](){}\\#&!%,:ends");
Assertions.assertEquals(
"name,.*\\^\\$\\.\\?\\*\\+\\[\\]\\(\\)\\{\\}\\\\#&!%$,:re",
q.toString());
Assertions.assertTrue(q.matches(registry.createId("^$.?*+[](){}\\#&!%")));
}
@Test
public void andQuery() {
Query q = parse("name,foo,:eq,bar,baz,:eq,:and");
Assertions.assertFalse(q.matches(registry.createId("foo")));
Assertions.assertFalse(q.matches(registry.createId("bar")));
Assertions.assertTrue(q.matches(registry.createId("foo", "bar", "baz")));
Assertions.assertFalse(q.matches(registry.createId("bar", "bar", "baz")));
Assertions.assertFalse(q.matches(registry.createId("foo", "bar", "def")));
Assertions.assertFalse(q.matches(registry.createId("foo", "abc", "def")));
}
@Test
public void andEqualsContract() {
EqualsVerifier
.forClass(Query.And.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void orQuery() {
Query q = parse("name,foo,:eq,bar,baz,:eq,:or");
Assertions.assertTrue(q.matches(registry.createId("foo")));
Assertions.assertFalse(q.matches(registry.createId("bar")));
Assertions.assertTrue(q.matches(registry.createId("foo", "bar", "baz")));
Assertions.assertTrue(q.matches(registry.createId("bar", "bar", "baz")));
Assertions.assertTrue(q.matches(registry.createId("foo", "bar", "def")));
Assertions.assertTrue(q.matches(registry.createId("foo", "abc", "def")));
}
@Test
public void orEqualsContract() {
EqualsVerifier
.forClass(Query.Or.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void notQuery() {
Query q = parse("name,foo,:eq,:not");
Assertions.assertFalse(q.matches(registry.createId("foo")));
Assertions.assertTrue(q.matches(registry.createId("bar")));
}
@Test
public void notEqualsContract() {
EqualsVerifier
.forClass(Query.Not.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void andOptimizationTrue() {
Query q1 = new Query.Has("a");
Query q2 = new Query.Has("b");
Assertions.assertEquals(new Query.And(q1, q2), q1.and(q2));
Assertions.assertEquals(q2, Query.TRUE.and(q2));
Assertions.assertEquals(q1, q1.and(Query.TRUE));
}
@Test
public void andOptimizationFalse() {
Query q1 = new Query.Has("a");
Query q2 = new Query.Has("b");
Assertions.assertEquals(new Query.And(q1, q2), q1.and(q2));
Assertions.assertEquals(Query.FALSE, Query.FALSE.and(q2));
Assertions.assertEquals(Query.FALSE, q1.and(Query.FALSE));
}
@Test
public void orOptimizationTrue() {
Query q1 = new Query.Has("a");
Query q2 = new Query.Has("b");
Assertions.assertEquals(new Query.Or(q1, q2), q1.or(q2));
Assertions.assertEquals(Query.TRUE, Query.TRUE.or(q2));
Assertions.assertEquals(Query.TRUE, q1.or(Query.TRUE));
}
@Test
public void orOptimizationFalse() {
Query q1 = new Query.Has("a");
Query q2 = new Query.Has("b");
Assertions.assertEquals(new Query.Or(q1, q2), q1.or(q2));
Assertions.assertEquals(q2, Query.FALSE.or(q2));
Assertions.assertEquals(q1, q1.or(Query.FALSE));
}
@Test
public void notOptimizationTrue() {
Assertions.assertEquals(Query.FALSE, Query.TRUE.not());
}
@Test
public void notOptimizationFalse() {
Assertions.assertEquals(Query.TRUE, Query.FALSE.not());
}
@Test
public void notOptimizationAnd() {
Query q1 = new Query.Has("a");
Query q2 = new Query.Has("b");
Query expected = new Query.Or(q1.not(), q2.not());
Assertions.assertEquals(expected, new Query.And(q1, q2).not());
}
@Test
public void notOptimizationOr() {
Query q1 = new Query.Has("a");
Query q2 = new Query.Has("b");
Query expected = new Query.And(q1.not(), q2.not());
Assertions.assertEquals(expected, new Query.Or(q1, q2).not());
}
@Test
public void notOptimizationNot() {
Query.KeyQuery q = new Query.Has("a");
Assertions.assertEquals(q, new Query.Not(q).not());
Assertions.assertEquals(q, new Query.InvertedKeyQuery(q).not());
Assertions.assertEquals(q, q.not().not());
Assertions.assertTrue(q.not() instanceof Query.InvertedKeyQuery);
}
@Test
public void parseOptimizationAnd() {
Assertions.assertEquals(Query.TRUE, Parser.parseQuery(":true,:true,:and"));
Assertions.assertEquals(Query.FALSE, Parser.parseQuery(":true,:false,:and"));
Assertions.assertEquals(Query.FALSE, Parser.parseQuery(":false,:true,:and"));
Assertions.assertEquals(Query.FALSE, Parser.parseQuery(":false,:false,:and"));
}
@Test
public void parseOptimizationOr() {
Assertions.assertEquals(Query.TRUE, Parser.parseQuery(":true,:true,:or"));
Assertions.assertEquals(Query.TRUE, Parser.parseQuery(":true,:false,:or"));
Assertions.assertEquals(Query.TRUE, Parser.parseQuery(":false,:true,:or"));
Assertions.assertEquals(Query.FALSE, Parser.parseQuery(":false,:false,:or"));
}
@Test
public void parseOptimizationNot() {
Assertions.assertEquals(Query.TRUE, Parser.parseQuery(":true,:not,:not"));
Assertions.assertEquals(Query.FALSE, Parser.parseQuery(":false,:not,:not"));
}
@Test
public void dnfListA() {
Query a = new Query.Has("a");
Assertions.assertEquals(Collections.singletonList(a), a.dnfList());
}
@Test
public void dnfListIn() {
List<Query> expected = new ArrayList<>();
Set<String> values = new TreeSet<>();
for (int i = 0; i < 5; ++i) {
values.add("" + i);
expected.add(new Query.Equal("k", "" + i));
}
Query q = new Query.In("k", values);
Assertions.assertEquals(expected, q.dnfList());
values.add("5");
Assertions.assertEquals(Collections.singletonList(q), q.dnfList());
}
@Test
public void dnfListAnd() {
Query a = new Query.Has("a");
Query b = new Query.Has("b");
Query q = a.and(b);
Assertions.assertEquals(Collections.singletonList(q), q.dnfList());
}
private List<Query> qs(Query... queries) {
return Arrays.asList(queries);
}
@Test
public void dnfListOrAnd() {
Query a = new Query.Has("a");
Query b = new Query.Has("b");
Query c = new Query.Has("c");
Query q = a.or(b).and(c);
Assertions.assertEquals(qs(a.and(c), b.and(c)), q.dnfList());
}
@Test
public void dnfListOrOrAnd() {
Query a = new Query.Has("a");
Query b = new Query.Has("b");
Query c = new Query.Has("c");
Query d = new Query.Has("d");
Query q = a.or(b).and(c.or(d));
List<Query> expected = qs(
a.and(c),
a.and(d),
b.and(c),
b.and(d)
);
Assertions.assertEquals(expected, q.dnfList());
}
@Test
public void dnfListNotOr() {
Query a = new Query.Has("a");
Query b = new Query.Has("b");
Query q = new Query.Not(a.or(b));
Assertions.assertEquals(qs(a.not().and(b.not())), q.dnfList());
}
@Test
public void dnfListNotAnd() {
Query a = new Query.Has("a");
Query b = new Query.Has("b");
Query q = new Query.Not(a.and(b));
Assertions.assertEquals(qs(a.not(), b.not()), q.dnfList());
}
@Test
public void dnfListNotSimple() {
Query a = new Query.Has("a");
Query q = new Query.Not(a);
Assertions.assertEquals(qs(q), q.dnfList());
}
@Test
public void dnfListSimplifiesToKeyQueries() {
Random r = new Random(42);
for (int i = 0; i < 1000; ++i) {
Query query = DataGenerator.randomQuery(r, 5);
for (Query dnfQ : query.dnfList()) {
for (Query q : dnfQ.andList()) {
Assertions.assertTrue(
q instanceof Query.KeyQuery || q == Query.TRUE || q == Query.FALSE,
"[" + q + "] is not a KeyQuery, extracted from [" + query + "]"
);
}
}
}
}
private Map<String, String> tags(String... vs) {
Map<String, String> tmp = new LinkedHashMap<>();
for (int i = 0; i < vs.length; i += 2) {
tmp.put(vs[i], vs[i + 1]);
}
return tmp;
}
@Test
public void simplifyEqualsMatch() {
Query q = Parser.parseQuery("nf.cluster,foo,:eq");
Assertions.assertEquals(Query.TRUE, q.simplify(tags("nf.cluster", "foo")));
}
@Test
public void simplifyEqualsNoMatch() {
Query q = Parser.parseQuery("nf.cluster,foo,:eq");
Assertions.assertEquals(Query.FALSE, q.simplify(tags("nf.cluster", "bar")));
}
@Test
public void simplifyEqualsNoValueForKey() {
Query q = Parser.parseQuery("nf.cluster,foo,:eq");
Assertions.assertSame(q, q.simplify(tags("nf.app", "foo")));
}
@Test
public void simplifyAndMatchLeft() {
Query q = Parser.parseQuery("nf.cluster,foo,:eq,name,cpu,:eq,:and");
Query expected = Parser.parseQuery("name,cpu,:eq");
Assertions.assertEquals(expected, q.simplify(tags("nf.cluster", "foo")));
}
@Test
public void simplifyAndMatchRight() {
Query q = Parser.parseQuery("name,cpu,:eq,nf.cluster,foo,:eq,:and");
Query expected = Parser.parseQuery("name,cpu,:eq");
Assertions.assertEquals(expected, q.simplify(tags("nf.cluster", "foo")));
}
@Test
public void simplifyAndNoMatch() {
Query q = Parser.parseQuery("nf.cluster,foo,:eq,name,cpu,:eq,:and");
Assertions.assertSame(Query.FALSE, q.simplify(tags("nf.cluster", "bar")));
}
@Test
public void simplifyAndNoValueForKey() {
Query q = Parser.parseQuery("nf.cluster,foo,:eq,name,cpu,:eq,:and");
Assertions.assertSame(q, q.simplify(tags("nf.app", "foo")));
}
@Test
public void simplifyOrMatchLeft() {
Query q = Parser.parseQuery("nf.cluster,foo,:eq,name,cpu,:eq,:or");
Assertions.assertEquals(Query.TRUE, q.simplify(tags("nf.cluster", "foo")));
}
@Test
public void simplifyOrMatchRight() {
Query q = Parser.parseQuery("name,cpu,:eq,nf.cluster,foo,:eq,:or");
Assertions.assertEquals(Query.TRUE, q.simplify(tags("nf.cluster", "foo")));
}
@Test
public void simplifyOrNoMatch() {
Query q = Parser.parseQuery("nf.cluster,foo,:eq,name,cpu,:eq,:or");
Query expected = Parser.parseQuery("name,cpu,:eq");
Assertions.assertEquals(expected, q.simplify(tags("nf.cluster", "bar")));
}
@Test
public void simplifyOrNoValueForKey() {
Query q = Parser.parseQuery("nf.cluster,foo,:eq,name,cpu,:eq,:or");
Assertions.assertSame(q, q.simplify(tags("nf.app", "foo")));
}
@Test
public void simplifyNotMatch() {
Query q = Parser.parseQuery("name,cpu,:eq,nf.cluster,foo,:eq,:not,:and");
Assertions.assertEquals(Query.FALSE, q.simplify(tags("nf.cluster", "foo")));
}
@Test
public void simplifyNotNoMatch() {
Query q = Parser.parseQuery("name,cpu,:eq,nf.cluster,foo,:eq,:not,:and");
Query expected = Parser.parseQuery("name,cpu,:eq");
Assertions.assertEquals(expected, q.simplify(tags("nf.cluster", "bar")));
}
@Test
public void simplifyNotNoValueForKeyMatch() {
Query q = Parser.parseQuery("name,cpu,:eq,nf.cluster,foo,:eq,:not,:and");
Assertions.assertSame(q, q.simplify(tags("nf.app", "foo")));
}
@Test
public void simplifyTrue() {
Query q = Parser.parseQuery(":true");
Assertions.assertSame(q, q.simplify(tags("nf.cluster", "foo")));
}
@Test
public void simplifyFalse() {
Query q = Parser.parseQuery(":false");
Assertions.assertSame(q, q.simplify(tags("nf.cluster", "foo")));
}
}
| 6,042 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/ValidationHelperTest.java | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.ipc.http.HttpResponse;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
public class ValidationHelperTest {
private static final Logger LOGGER = LoggerFactory.getLogger(ValidationHelperTest.class);
private static final ObjectMapper MAPPER = new ObjectMapper();
private void check(Registry r, long sent, long http, long invalid, long other) {
Id baseId = r.createId("spectator.measurements");
Id droppedId = baseId.withTag("id", "dropped");
Assertions.assertEquals(sent, r.counter(baseId.withTag("id", "sent")).count());
Assertions.assertEquals(http, r.counter(droppedId.withTag("error", "http-error")).count());
Assertions.assertEquals(invalid, r.counter(droppedId.withTag("error", "validation")).count());
Assertions.assertEquals(other, r.counter(droppedId.withTag("error", "other")).count());
}
private HttpResponse httpResponse(int status, ValidationResponse vres) throws IOException {
String json = MAPPER.writeValueAsString(vres);
return new HttpResponse(status, Collections.emptyMap(), json.getBytes(StandardCharsets.UTF_8));
}
@Test
public void incrementDroppedHttp() {
Registry registry = new DefaultRegistry();
ValidationHelper helper = new ValidationHelper(LOGGER, MAPPER, registry);
helper.incrementDroppedHttp(42);
check(registry, 0, 42, 0, 0);
}
@Test
public void ok() {
Registry registry = new DefaultRegistry();
ValidationHelper helper = new ValidationHelper(LOGGER, MAPPER, registry);
helper.recordResults(42, new HttpResponse(200, Collections.emptyMap()));
check(registry, 42, 0, 0, 0);
}
@Test
public void validationErrorPartial() throws IOException {
Registry registry = new DefaultRegistry();
ValidationHelper helper = new ValidationHelper(LOGGER, MAPPER, registry);
ValidationResponse vres = new ValidationResponse();
vres.setType("error");
vres.setErrorCount(3);
vres.setMessage(Collections.singletonList("foo"));
helper.recordResults(42, httpResponse(202, vres));
check(registry, 39, 0, 3, 0);
}
@Test
public void validationErrorAll() throws IOException {
Registry registry = new DefaultRegistry();
ValidationHelper helper = new ValidationHelper(LOGGER, MAPPER, registry);
ValidationResponse vres = new ValidationResponse();
vres.setType("error");
vres.setErrorCount(42);
vres.setMessage(Collections.singletonList("foo"));
helper.recordResults(42, httpResponse(400, vres));
check(registry, 0, 0, 42, 0);
}
@Test
public void validationErrorNullMessages() throws IOException {
Registry registry = new DefaultRegistry();
ValidationHelper helper = new ValidationHelper(LOGGER, MAPPER, registry);
ValidationResponse vres = new ValidationResponse();
vres.setType("error");
vres.setErrorCount(42);
helper.recordResults(42, httpResponse(400, vres));
check(registry, 0, 0, 42, 0);
}
@Test
public void validationErrorEmptyMessages() throws IOException {
Registry registry = new DefaultRegistry();
ValidationHelper helper = new ValidationHelper(LOGGER, MAPPER, registry);
ValidationResponse vres = new ValidationResponse();
vres.setType("error");
vres.setErrorCount(42);
vres.setMessage(Collections.emptyList());
helper.recordResults(42, httpResponse(400, vres));
check(registry, 0, 0, 42, 0);
}
@Test
public void validationErrorBadJson() throws IOException {
Registry registry = new DefaultRegistry();
ValidationHelper helper = new ValidationHelper(LOGGER, MAPPER, registry);
HttpResponse res = new HttpResponse(400, Collections.emptyMap());
helper.recordResults(42, res);
check(registry, 0, 0, 0, 42);
}
@Test
public void serverError() {
Registry registry = new DefaultRegistry();
ValidationHelper helper = new ValidationHelper(LOGGER, MAPPER, registry);
helper.recordResults(42, new HttpResponse(500, Collections.emptyMap()));
check(registry, 0, 42, 0, 0);
}
}
| 6,043 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/EvalPayloadTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import nl.jqno.equalsverifier.EqualsVerifier;
import nl.jqno.equalsverifier.Warning;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class EvalPayloadTest {
@Test
public void metricEquals() {
EqualsVerifier.forClass(EvalPayload.Metric.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void messageEquals() {
EqualsVerifier.forClass(EvalPayload.Message.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void diagnosticMessageEquals() {
EqualsVerifier.forClass(EvalPayload.DiagnosticMessage.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void evalPayloadEquals() {
EqualsVerifier.forClass(EvalPayload.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
private List<EvalPayload.Metric> metrics(int n) {
List<EvalPayload.Metric> ms = new ArrayList<>();
for (int i = 0; i < n; ++i) {
ms.add(new EvalPayload.Metric("_", Collections.emptyMap(), i));
}
return ms;
}
private List<EvalPayload.Message> messages(int n) {
List<EvalPayload.Message> ms = new ArrayList<>();
for (int i = 0; i < n; ++i) {
ms.add(new EvalPayload.Message(
"_",
new EvalPayload.DiagnosticMessage(EvalPayload.MessageType.error, "" + i)
));
}
return ms;
}
@Test
public void toBatchesBelowThreshold() {
EvalPayload payload = new EvalPayload(0L, metrics(4));
List<EvalPayload> batches = payload.toBatches(4);
Assertions.assertEquals(1, batches.size());
Assertions.assertSame(payload, batches.get(0));
}
@Test
public void toBatchesAboveThreshold() {
EvalPayload payload = new EvalPayload(0L, metrics(21));
List<EvalPayload> batches = payload.toBatches(4);
Assertions.assertEquals(6, batches.size());
int i = 0;
for (EvalPayload batch : batches) {
for (EvalPayload.Metric metric : batch.getMetrics()) {
int v = (int) metric.getValue();
Assertions.assertEquals(i, v);
++i;
}
}
}
@Test
public void toBatchesWithMessages() {
EvalPayload payload = new EvalPayload(0L, metrics(21), messages(2));
List<EvalPayload> batches = payload.toBatches(4);
Assertions.assertEquals(6, batches.size());
int i = 0;
for (EvalPayload batch : batches) {
Assertions.assertEquals(i == 0 ? 2 : 0, batch.getMessages().size());
for (EvalPayload.Metric metric : batch.getMetrics()) {
int v = (int) metric.getValue();
Assertions.assertEquals(i, v);
++i;
}
}
}
}
| 6,044 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/DataExprTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Tag;
import nl.jqno.equalsverifier.EqualsVerifier;
import nl.jqno.equalsverifier.Warning;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
public class DataExprTest {
private final Registry registry = new DefaultRegistry();
private DataExpr parse(String expr) {
DataExpr de = Parser.parseDataExpr(expr);
Assertions.assertEquals(expr, de.toString());
return de;
}
private List<TagsValuePair> data(String name, double... vs) {
List<Measurement> ms = new ArrayList<>();
for (int i = 0; i < vs.length; ++i) {
String pos = String.format("%03d", i);
String value = String.format("%f", vs[i]);
ms.add(new Measurement(registry.createId(name, "i", pos, "v", value), 0L, vs[i]));
}
return ms.stream().map(this::newTagsValuePair).collect(Collectors.toList());
}
private TagsValuePair newTagsValuePair(Measurement m) {
Map<String, String> tags = new HashMap<>();
for (Tag t : m.id().tags()) {
tags.put(t.key(), t.value());
}
tags.put("name", m.id().name());
return new TagsValuePair(tags, m.value());
}
private Iterable<TagsValuePair> evalNoCheck(DataExpr expr, Iterable<TagsValuePair> input) {
DataExpr.Aggregator aggr = expr.aggregator(false);
for (TagsValuePair p : input) {
aggr.update(p);
}
return aggr.result();
}
@Test
public void sumEmpty() {
DataExpr expr = parse(":true,:sum");
Assertions.assertFalse(expr.eval(Collections.emptyList()).iterator().hasNext());
}
@Test
public void minEmpty() {
DataExpr expr = parse(":true,:min");
Assertions.assertFalse(expr.eval(Collections.emptyList()).iterator().hasNext());
}
@Test
public void maxEmpty() {
DataExpr expr = parse(":true,:max");
Assertions.assertFalse(expr.eval(Collections.emptyList()).iterator().hasNext());
}
@Test
public void countEmpty() {
DataExpr expr = parse(":true,:count");
Assertions.assertFalse(expr.eval(Collections.emptyList()).iterator().hasNext());
}
private void aggrData(String aggr, double expected, boolean shouldCheckQuery) {
DataExpr expr = parse("name,foo,:eq," + aggr);
List<TagsValuePair> ms = data("foo", 1.0, 2.0, 3.0, 1.0);
ms.addAll(data("bar", 42.0));
Map<String, String> expectedTags = new HashMap<>();
expectedTags.put("name", "foo");
Iterable<TagsValuePair> vs = shouldCheckQuery ? expr.eval(ms) : evalNoCheck(expr, ms);
int count = 0;
for (TagsValuePair v : vs) {
++count;
Assertions.assertEquals(expectedTags, v.tags());
Assertions.assertEquals(expected, v.value(), 1e-12);
}
Assertions.assertEquals(1, count);
}
private void aggrData(String aggr, double expected) {
aggrData(aggr, expected, true);
}
@Test
public void sumData() {
aggrData(":sum", 7.0);
}
@Test
public void sumDataNoCheck() {
aggrData(":sum", 49.0, false);
}
@Test
public void minData() {
aggrData(":min", 1.0);
}
@Test
public void minDataNoCheck() {
aggrData(":min", 1.0, false);
}
@Test
public void maxData() {
aggrData(":max", 3.0);
}
@Test
public void maxDataNoCheck() {
aggrData(":max", 42.0, false);
}
@Test
public void countData() {
aggrData(":count", 4.0);
}
@Test
public void countDataNoCheck() {
aggrData(":count", 5.0, false);
}
@Test
public void groupByNameData() {
aggrData(":sum,(,name,),:by", 7.0);
}
@Test
public void groupByNameDataNoCheck() {
// Note, this test shows a problem if the query is not checked properly when
// using shouldCheckQuery = false. There are two names in the group by, but
// only one shows up because name is restricted in the query and that overrides
// the value from the group by. If the query had been checked, then the mismatched
// names would not be possible.
aggrData(":sum,(,name,),:by", 49.0, false);
}
private void groupingData(String aggr) {
groupingData(aggr, true);
}
private void groupingData(String aggr, boolean shouldCheckQuery) {
DataExpr expr = parse("name,foo,:eq,:sum," + aggr);
List<TagsValuePair> ms = data("foo", 1.0, 2.0, 3.0, 1.0);
ms.addAll(data("bar", 42.0));
Iterable<TagsValuePair> vs = shouldCheckQuery ? expr.eval(ms) : evalNoCheck(expr, ms);
int count = 0;
for (TagsValuePair v : vs) {
++count;
Assertions.assertEquals(2, v.tags().size());
if (shouldCheckQuery) {
Assertions.assertEquals("foo", v.tags().get("name"));
}
double tv = Double.parseDouble(v.tags().get("v"));
Assertions.assertEquals(Math.max(tv, 2.0), v.value(), 1e-12);
}
Assertions.assertEquals(shouldCheckQuery ? 3 : 4, count);
}
@Test
public void groupByValueData() {
groupingData("(,v,),:by");
}
@Test
public void groupByValueDataNoCheck() {
groupingData("(,v,),:by", false);
}
@Test
public void groupByUnknownData() {
DataExpr expr = parse("name,foo,:eq,:sum,(,a,v,),:by");
List<TagsValuePair> ms = data("foo", 1.0, 2.0, 3.0, 1.0);
ms.addAll(data("bar", 42.0));
Iterable<TagsValuePair> vs = expr.eval(ms);
Assertions.assertFalse(vs.iterator().hasNext());
}
@Test
public void rollupKeepData() {
groupingData("(,v,name,),:rollup-keep");
}
@Test
public void rollupKeepDataNoCheck() {
groupingData("(,v,name,),:rollup-keep", false);
}
@Test
public void rollupKeepUnknownData() {
groupingData("(,a,v,name,),:rollup-keep");
}
@Test
public void rollupDropData() {
groupingData("(,i,),:rollup-drop");
}
@Test
public void rollupDropDataNoCheck() {
groupingData("(,i,),:rollup-drop", false);
}
@Test
public void allData() {
DataExpr expr = parse("name,foo,:eq,:all");
List<TagsValuePair> ms = data("foo", 1.0, 2.0, 3.0, 1.0);
ms.addAll(data("bar", 42.0));
Iterable<TagsValuePair> vs = expr.eval(ms);
Assertions.assertEquals(4, StreamSupport.stream(vs.spliterator(), false).count());
}
@Test
public void allDataNoCheck() {
DataExpr expr = parse("name,foo,:eq,:all");
List<TagsValuePair> ms = data("foo", 1.0, 2.0, 3.0, 1.0);
ms.addAll(data("bar", 42.0));
Iterable<TagsValuePair> vs = evalNoCheck(expr, ms);
Assertions.assertEquals(5, StreamSupport.stream(vs.spliterator(), false).count());
}
@Test
public void notData() {
DataExpr expr = parse("name,foo,:eq,:not,:all");
List<TagsValuePair> ms = data("foo", 1.0, 2.0, 3.0, 1.0);
ms.addAll(data("bar", 42.0));
Iterable<TagsValuePair> vs = expr.eval(ms);
Assertions.assertEquals(1, StreamSupport.stream(vs.spliterator(), false).count());
}
@Test
public void inWithGroupBy() {
// https://github.com/Netflix/spectator/issues/391
parse("statistic,(,totalAmount,totalTime,),:in,name,jvm.gc.pause,:eq,:and,:sum,(,nf.asg,nf.node,),:by");
}
@Test
public void nestedInClauses() {
Set<String> values = new TreeSet<>();
values.add("key");
values.add("(");
values.add("a");
values.add("b");
values.add(")");
values.add(":in");
DataExpr expected = new DataExpr.Sum(new Query.In("key", values));
DataExpr actual = Parser.parseDataExpr("key,(,key,(,a,b,),:in,),:in,:sum");
Assertions.assertEquals(expected, actual);
}
@Test
public void multiNestedInClauses() {
Set<String> values = new TreeSet<>(
Arrays.asList("key,(,a,(,b,(,c,),),(,),),:in".split(",")));
DataExpr expected = new DataExpr.Sum(new Query.In("key", values));
DataExpr actual = Parser.parseDataExpr("key,(,key,(,a,(,b,(,c,),),(,),),:in,),:in,:sum");
Assertions.assertEquals(expected, actual);
}
@Test
public void mismatchedOpenParen() {
Assertions.assertThrows(IllegalArgumentException.class,
() -> Parser.parseDataExpr("key,(,key,(,),:in,:sum"));
}
@Test
public void mismatchedClosingParen() {
Assertions.assertThrows(IllegalArgumentException.class,
() -> Parser.parseDataExpr("key,(,key,),),:in,:sum"));
}
@Test
public void allEqualsContract() {
EqualsVerifier
.forClass(DataExpr.All.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void sumEqualsContract() {
EqualsVerifier
.forClass(DataExpr.Sum.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void minEqualsContract() {
EqualsVerifier
.forClass(DataExpr.Min.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void maxEqualsContract() {
EqualsVerifier
.forClass(DataExpr.Max.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void countEqualsContract() {
EqualsVerifier
.forClass(DataExpr.Count.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void byEqualsContract() {
EqualsVerifier
.forClass(DataExpr.GroupBy.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void dropEqualsContract() {
EqualsVerifier
.forClass(DataExpr.DropRollup.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
@Test
public void keepEqualsContract() {
EqualsVerifier
.forClass(DataExpr.KeepRollup.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
}
| 6,045 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/SubscriptionsTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import nl.jqno.equalsverifier.EqualsVerifier;
import nl.jqno.equalsverifier.Warning;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
public class SubscriptionsTest {
@Test
public void equalsContract() {
EqualsVerifier.forClass(Subscriptions.class)
.suppress(Warning.NULL_FIELDS, Warning.NONFINAL_FIELDS)
.verify();
}
private Map<Subscription, Long> map(long ttl, Subscription... subs) {
Map<Subscription, Long> m = new HashMap<>();
for (Subscription sub : subs) {
m.put(sub, ttl);
}
return m;
}
private Subscription newSub(String id, String expr, long freq) {
return new Subscription().withId(id).withExpression(expr).withFrequency(freq);
}
private Subscriptions newSubs(Subscription... subs) {
return new Subscriptions().withExpressions(Arrays.asList(subs));
}
@Test
public void updateInit() {
Map<Subscription, Long> subs = new HashMap<>();
Subscription a = newSub("a", ":true,:sum", 10L);
Subscription b = newSub("b", ":true,:sum", 60L);
newSubs(a, b).update(subs, 0L, 15L);
Assertions.assertEquals(map(15L, a, b), subs);
}
@Test
public void updateComplete() {
Subscription a = newSub("a", ":true,:sum", 10L);
Subscription b = newSub("b", ":true,:sum", 60L);
Map<Subscription, Long> subs = map(15L, a, b);
newSubs(a, b).update(subs, 10L, 30L);
Assertions.assertEquals(map(30L, a, b), subs);
}
@Test
public void updatePartial() {
Subscription a = newSub("a", ":true,:sum", 10L);
Subscription b = newSub("b", ":true,:sum", 60L);
Map<Subscription, Long> subs = map(15L, a, b);
newSubs(b).update(subs, 10L, 30L);
Map<Subscription, Long> expected = map(15L, a, b);
expected.put(b, 30L);
Assertions.assertEquals(expected, subs);
}
@Test
public void updatePartialExpire() {
Subscription a = newSub("a", ":true,:sum", 10L);
Subscription b = newSub("b", ":true,:sum", 60L);
Map<Subscription, Long> subs = map(15L, a, b);
newSubs(b).update(subs, 16L, 30L);
Map<Subscription, Long> expected = map(30L, b);
Assertions.assertEquals(expected, subs);
}
}
| 6,046 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/TagsValuePairTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import nl.jqno.equalsverifier.EqualsVerifier;
import nl.jqno.equalsverifier.Warning;
import org.junit.jupiter.api.Test;
public class TagsValuePairTest {
@Test
public void equalsContract() {
EqualsVerifier.forClass(TagsValuePair.class)
.suppress(Warning.NULL_FIELDS)
.verify();
}
}
| 6,047 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/DataGenerator.java | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
final class DataGenerator {
private DataGenerator() {
}
private static final String[] COMMON_KEYS = {
"id",
"name",
"nf.app",
"nf.asg",
"nf.cluster",
"nf.region",
"nf.stack",
"nf.zone",
"statistic"
};
private static String randomKey(Random r) {
return COMMON_KEYS[r.nextInt(COMMON_KEYS.length)];
}
private static String randomString(Random r) {
char c = (char) ('a' + r.nextInt(26));
return "" + c;
}
private static Set<String> randomStringSet(Random r) {
Set<String> strings = new HashSet<>();
int n = r.nextInt(10) + 1;
for (int i = 0; i < n; ++i) {
strings.add(randomString(r));
}
return strings;
}
static Query randomQuery(Random r, int depth) {
if (depth > 0) {
Query q;
switch (r.nextInt(12)) {
case 0:
q = randomQuery(r, depth - 1).and(randomQuery(r, depth - 1));
break;
case 1:
q = randomQuery(r, depth - 1).or(randomQuery(r, depth - 1));
break;
case 2:
q = randomQuery(r, depth - 1).not();
break;
case 3:
q = new Query.Equal(randomKey(r), randomString(r));
break;
case 4:
q = new Query.In(randomKey(r), randomStringSet(r));
break;
case 5:
q = new Query.Regex(randomKey(r), randomString(r));
break;
case 6:
q = new Query.GreaterThan(randomKey(r), randomString(r));
break;
case 7:
q = new Query.GreaterThanEqual(randomKey(r), randomString(r));
break;
case 8:
q = new Query.LessThan(randomKey(r), randomString(r));
break;
case 9:
q = new Query.LessThanEqual(randomKey(r), randomString(r));
break;
case 10:
q = new Query.Has(randomKey(r));
break;
default:
q = r.nextBoolean() ? Query.TRUE : Query.FALSE;
}
return q;
} else {
return new Query.Has(randomString(r));
}
}
}
| 6,048 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/ConsolidatorTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.ManualClock;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Statistic;
import com.netflix.spectator.atlas.AtlasRegistry;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.time.Duration;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.function.DoubleConsumer;
import java.util.function.Supplier;
public class ConsolidatorTest {
private static final long TTL = 15L * 60L * 1000L;
private static final long PRIMARY_STEP = 5L * 1000L;
private static final long CONSOLIDATED_STEP = 60L * 1000L;
private static final int MULTIPLE = (int) (CONSOLIDATED_STEP / PRIMARY_STEP);
private AtlasRegistry registry(Clock clock, long step) {
Map<String, String> config = new HashMap<>();
config.put("atlas.meterTTL", Duration.ofMillis(TTL).toString());
config.put("atlas.step", Duration.ofMillis(step).toString());
config.put("atlas.lwc.step", Duration.ofMillis(step).toString());
return new AtlasRegistry(clock, config::get);
}
@Test
public void avgNormalOperation() {
Consolidator consolidator = new Consolidator.Avg(CONSOLIDATED_STEP, MULTIPLE);
for (int i = 0; i < 10; ++i) {
long baseTimestamp = i * CONSOLIDATED_STEP;
for (int j = 0; j < MULTIPLE; ++j) {
consolidator.update(baseTimestamp + j * PRIMARY_STEP, j);
}
Assertions.assertEquals(i == 0 ? 0.0 : 5.5, consolidator.value(baseTimestamp), 1e-8);
}
}
@Test
public void avgMissingPrimaryValues() {
Consolidator consolidator = new Consolidator.Avg(CONSOLIDATED_STEP, MULTIPLE);
consolidator.update(30000, 12.0);
consolidator.update(60000, Double.NaN);
Assertions.assertEquals(1.0, consolidator.value(60000));
}
@Test
public void avgSingleStepGap() {
Consolidator consolidator = new Consolidator.Avg(CONSOLIDATED_STEP, MULTIPLE);
consolidator.update(30000, 12.0);
consolidator.update(110000, 12.0);
Assertions.assertEquals(1.0, consolidator.value(60000));
consolidator.update(120000, Double.NaN);
Assertions.assertEquals(1.0, consolidator.value(120000));
}
@Test
public void avgManyStepGap() {
Consolidator consolidator = new Consolidator.Avg(CONSOLIDATED_STEP, MULTIPLE);
consolidator.update(30000, 12.0);
consolidator.update(360000, 12.0);
Assertions.assertTrue(Double.isNaN(consolidator.value(60000)));
Assertions.assertTrue(Double.isNaN(consolidator.value(360000)));
}
@Test
public void avgBackInTime() {
Consolidator consolidator = new Consolidator.Avg(CONSOLIDATED_STEP, MULTIPLE);
consolidator.update(360000, 12.0);
consolidator.update(350000, 12.0);
Assertions.assertEquals(1.0, consolidator.value(360000), 1e-8);
}
@Test
public void avgEmpty() {
Consolidator consolidator = new Consolidator.Avg(CONSOLIDATED_STEP, MULTIPLE);
Assertions.assertTrue(consolidator.isEmpty());
consolidator.update(30000, 12.0);
Assertions.assertFalse(consolidator.isEmpty());
consolidator.update(150000, Double.NaN);
Assertions.assertTrue(consolidator.isEmpty());
}
@Test
public void noneEmpty() {
Consolidator consolidator = new Consolidator.None(false);
Assertions.assertTrue(consolidator.isEmpty());
consolidator.update(30000, 12.0);
Assertions.assertFalse(consolidator.isEmpty());
consolidator.update(150000, Double.NaN);
Assertions.assertTrue(consolidator.isEmpty());
}
private void consolidateRandomData(
Id measurementId,
ManualClock clock,
Consolidator consolidator,
DoubleConsumer primary,
DoubleConsumer consolidated,
Supplier<Iterable<Measurement>> primaryMeasure,
Supplier<Iterable<Measurement>> consolidatedMeasure) {
Random r = new Random(42);
for (int i = 0; i < 3600; ++i) {
long t = i * 1000L;
clock.setWallTime(t);
int v = r.nextInt(10_000);
primary.accept(v);
consolidated.accept(v);
if (t % PRIMARY_STEP == 0L) {
for (Measurement m : primaryMeasure.get()) {
consolidator.update(m);
}
}
if (t % CONSOLIDATED_STEP == 0L) {
Measurement actual = new Measurement(measurementId, t, consolidator.value(t));
for (Measurement m : consolidatedMeasure.get()) {
Assertions.assertEquals(m.id(), actual.id());
Assertions.assertEquals(m.timestamp(), actual.timestamp());
Assertions.assertEquals(m.value(), actual.value(), 1e-8);
}
}
// Simulate a gap
if (i == 968) {
i += 360;
}
}
}
@Test
public void avgRandom() {
Id id = Id.create("test");
Id measurementId = id.withTag("atlas.dstype", "rate").withTag(Statistic.count);
ManualClock clock = new ManualClock();
Counter primary = registry(clock, PRIMARY_STEP).counter(id);
Counter consolidated = registry(clock, CONSOLIDATED_STEP).counter(id);
Consolidator consolidator = new Consolidator.Avg(CONSOLIDATED_STEP, MULTIPLE);
consolidateRandomData(
measurementId,
clock,
consolidator,
primary::add,
consolidated::add,
primary::measure,
consolidated::measure);
}
@Test
public void maxRandom() {
Id id = Id.create("test");
Id measurementId = id.withTag("atlas.dstype", "gauge").withTag(Statistic.max);
ManualClock clock = new ManualClock();
Gauge primary = registry(clock, PRIMARY_STEP).maxGauge(id);
Gauge consolidated = registry(clock, CONSOLIDATED_STEP).maxGauge(id);
Consolidator consolidator = new Consolidator.Max(CONSOLIDATED_STEP, MULTIPLE);
consolidateRandomData(
measurementId,
clock,
consolidator,
primary::set,
consolidated::set,
primary::measure,
consolidated::measure);
}
@Test
public void noneRandom() {
Id id = Id.create("test");
Id measurementId = id.withTag("atlas.dstype", "rate").withTag(Statistic.count);
ManualClock clock = new ManualClock();
Counter primary = registry(clock, CONSOLIDATED_STEP).counter(id);
Counter consolidated = registry(clock, CONSOLIDATED_STEP).counter(id);
Consolidator consolidator = new Consolidator.None(false);
consolidateRandomData(
measurementId,
clock,
consolidator,
primary::add,
consolidated::add,
primary::measure,
consolidated::measure);
}
@Test
public void createFromStatistic() {
EnumSet<Statistic> counters = EnumSet.of(
Statistic.count,
Statistic.totalAmount,
Statistic.totalTime,
Statistic.totalOfSquares,
Statistic.percentile);
for (Statistic statistic : Statistic.values()) {
Consolidator consolidator = Consolidator.create(statistic, CONSOLIDATED_STEP, MULTIPLE);
if (counters.contains(statistic)) {
Assertions.assertTrue(consolidator instanceof Consolidator.Avg, statistic.name());
} else {
Assertions.assertTrue(consolidator instanceof Consolidator.Max, statistic.name());
}
}
}
@Test
public void createFromIdCounter() {
Id id = Id.create("foo").withTag(Statistic.count);
Consolidator consolidator = Consolidator.create(id, CONSOLIDATED_STEP, MULTIPLE);
Assertions.assertTrue(consolidator instanceof Consolidator.Avg);
}
@Test
public void createFromIdGauge() {
Id id = Id.create("foo").withTag(Statistic.gauge);
Consolidator consolidator = Consolidator.create(id, CONSOLIDATED_STEP, MULTIPLE);
Assertions.assertTrue(consolidator instanceof Consolidator.Max);
}
@Test
public void createFromIdNoStatistic() {
Id id = Id.create("foo");
Consolidator consolidator = Consolidator.create(id, CONSOLIDATED_STEP, MULTIPLE);
Assertions.assertTrue(consolidator instanceof Consolidator.Max);
}
@Test
public void createFromIdMultipleOne() {
Id id = Id.create("foo");
Consolidator consolidator = Consolidator.create(id, CONSOLIDATED_STEP, 1);
Assertions.assertTrue(consolidator instanceof Consolidator.None);
}
}
| 6,049 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/QueryIndexTest.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.NoopRegistry;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.impl.Cache;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.function.Function;
public class QueryIndexTest {
private final Registry registry = new DefaultRegistry();
private final QueryIndex.CacheSupplier<Query> cacheSupplier = new QueryIndex.CacheSupplier<Query>() {
@Override
public Cache<String, List<QueryIndex<Query>>> get() {
return new Cache<String, List<QueryIndex<Query>>>() {
private final Map<String, List<QueryIndex<Query>>> data = new HashMap<>();
@Override
public List<QueryIndex<Query>> get(String key) {
// Cache for a single call
return data.remove(key);
}
@Override
public List<QueryIndex<Query>> peek(String key) {
return null;
}
@Override
public void put(String key, List<QueryIndex<Query>> value) {
data.put(key, value);
}
@Override
public List<QueryIndex<Query>> computeIfAbsent(String key, Function<String, List<QueryIndex<Query>>> f) {
return data.computeIfAbsent(key, f);
}
@Override
public void clear() {
data.clear();
}
@Override
public int size() {
return data.size();
}
@Override
public Map<String, List<QueryIndex<Query>>> asMap() {
return new HashMap<>(data);
}
};
}
};
private Id id(String name, String... tags) {
return registry.createId(name, tags);
}
private List<Query> sort(List<Query> vs) {
vs.sort(Comparator.comparing(Object::toString));
return vs;
}
private List<Query> list(Query... vs) {
return sort(Arrays.asList(vs));
}
@Test
public void empty() {
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier);
assertEquals(Collections.emptyList(), idx, id("a"));
}
private static final Query SIMPLE_QUERY = Parser.parseQuery("name,a,:eq,key,b,:eq,:and");
private QueryIndex<Query> simpleIdx() {
return QueryIndex.newInstance(cacheSupplier).add(SIMPLE_QUERY, SIMPLE_QUERY);
}
private void assertEquals(List<Query> expected, QueryIndex<Query> idx, Id id) {
// Do multiple iterations just to exercise caching and cache expiration paths
for (int i = 0; i < 4; ++i) {
Assertions.assertEquals(expected, sort(idx.findMatches(id)));
}
for (int i = 0; i < 4; ++i) {
Assertions.assertEquals(expected, sort(idx.findMatches(Query.toMap(id)::get)));
}
}
@Test
public void simpleMissingKey() {
Id id = id("a", "foo", "bar");
assertEquals(Collections.emptyList(), simpleIdx(), id);
}
@Test
public void simpleMatches() {
Id id1 = id("a", "key", "b");
Id id2 = id("a", "foo", "bar", "key", "b");
assertEquals(list(SIMPLE_QUERY), simpleIdx(), id1);
assertEquals(list(SIMPLE_QUERY), simpleIdx(),id2);
}
@Test
public void simpleNameDoesNotMatch() {
Id id = id("b", "foo", "bar");
assertEquals(Collections.emptyList(), simpleIdx(), id);
}
@Test
public void simpleRemoveValue() {
QueryIndex<Query> idx = simpleIdx();
Assertions.assertTrue(idx.remove(SIMPLE_QUERY, SIMPLE_QUERY));
Assertions.assertTrue(idx.isEmpty());
Id id = id("a", "key", "b");
Assertions.assertTrue(idx.findMatches(id).isEmpty());
}
private static final Query HASKEY_QUERY = Parser.parseQuery("name,a,:eq,key,b,:eq,:and,c,:has,:and");
private QueryIndex<Query> hasKeyIdx() {
return QueryIndex.newInstance(cacheSupplier).add(HASKEY_QUERY, HASKEY_QUERY);
}
@Test
public void hasKeyMissingKey() {
Id id = id("a", "key", "b", "foo", "bar");
assertEquals(Collections.emptyList(), hasKeyIdx(), id);
}
@Test
public void hasKeyMatches() {
Id id1 = id("a", "key", "b", "c", "12345");
Id id2 = id("a", "foo", "bar", "key", "b", "c", "foobar");
assertEquals(list(HASKEY_QUERY), hasKeyIdx(), id1);
assertEquals(list(HASKEY_QUERY), hasKeyIdx(), id2);
}
@Test
public void hasKeyRepeat() {
Id id1 = id("a", "key", "b", "c", "12345");
QueryIndex<Query> idx = hasKeyIdx();
for (int i = 0; i < 10; ++i) {
// Subsequent checks for :has operation should come from cache
assertEquals(list(HASKEY_QUERY), idx, id1);
}
}
private static final Query IN_QUERY = Parser.parseQuery("name,a,:eq,key,(,b,c,),:in,:and");
private QueryIndex<Query> inIdx() {
return QueryIndex.newInstance(cacheSupplier).add(IN_QUERY, IN_QUERY);
}
@Test
public void inMissingKey() {
Id id = id("a", "key2", "b", "foo", "bar");
assertEquals(Collections.emptyList(), inIdx(), id);
}
@Test
public void inMatches() {
Id id1 = id("a", "key", "b", "c", "12345");
Id id2 = id("a", "foo", "bar", "key", "c", "c", "foobar");
assertEquals(list(IN_QUERY), inIdx(), id1);
assertEquals(list(IN_QUERY), inIdx(), id2);
}
@Test
public void inValueNotInSet() {
Id id = id("a", "key", "d", "c", "12345");
assertEquals(Collections.emptyList(), inIdx(), id);
}
@Test
public void trueMatches() {
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(Query.TRUE, Query.TRUE);
Id id1 = id("a", "key", "b", "c", "12345");
Id id2 = id("a", "foo", "bar", "key", "b", "c", "foobar");
assertEquals(list(Query.TRUE), idx, id1);
assertEquals(list(Query.TRUE), idx, id2);
}
@Test
public void falseDoesNotMatch() {
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(Query.FALSE, Query.FALSE);
Assertions.assertTrue(idx.isEmpty());
}
@Test
public void removals() {
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier);
idx.add(SIMPLE_QUERY, SIMPLE_QUERY);
idx.add(HASKEY_QUERY, HASKEY_QUERY);
idx.add(IN_QUERY, IN_QUERY);
Id id1 = id("a", "key", "b", "c", "12345");
assertEquals(list(SIMPLE_QUERY, IN_QUERY, HASKEY_QUERY), idx, id1);
Query q = Parser.parseQuery("name,a,:eq");
Assertions.assertFalse(idx.remove(q, q));
assertEquals(list(SIMPLE_QUERY, IN_QUERY, HASKEY_QUERY), idx, id1);
Assertions.assertTrue(idx.remove(IN_QUERY, IN_QUERY));
assertEquals(list(SIMPLE_QUERY, HASKEY_QUERY), idx, id1);
Assertions.assertTrue(idx.remove(SIMPLE_QUERY, SIMPLE_QUERY));
assertEquals(list(HASKEY_QUERY), idx, id1);
Assertions.assertTrue(idx.remove(HASKEY_QUERY, HASKEY_QUERY));
Assertions.assertTrue(idx.isEmpty());
assertEquals(Collections.emptyList(), idx, id1);
idx.add(SIMPLE_QUERY, SIMPLE_QUERY);
assertEquals(list(SIMPLE_QUERY), idx, id1);
}
private boolean remove(QueryIndex<Query> idx, Query value) {
return idx.remove(value, value);
}
@Test
public void removalsUsingQuery() {
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier);
idx.add(SIMPLE_QUERY, SIMPLE_QUERY);
idx.add(HASKEY_QUERY, HASKEY_QUERY);
idx.add(IN_QUERY, IN_QUERY);
Id id1 = id("a", "key", "b", "c", "12345");
assertEquals(list(SIMPLE_QUERY, IN_QUERY, HASKEY_QUERY), idx, id1);
Assertions.assertFalse(remove(idx, Parser.parseQuery("name,a,:eq")));
assertEquals(list(SIMPLE_QUERY, IN_QUERY, HASKEY_QUERY), idx, id1);
Assertions.assertTrue(remove(idx, IN_QUERY));
assertEquals(list(SIMPLE_QUERY, HASKEY_QUERY), idx, id1);
Assertions.assertTrue(remove(idx, SIMPLE_QUERY));
assertEquals(list(HASKEY_QUERY), idx, id1);
Assertions.assertTrue(remove(idx, HASKEY_QUERY));
Assertions.assertTrue(idx.isEmpty());
assertEquals(Collections.emptyList(), idx, id1);
idx.add(SIMPLE_QUERY, SIMPLE_QUERY);
assertEquals(list(SIMPLE_QUERY), idx, id1);
}
private Set<String> set(int n) {
Set<String> tmp = new LinkedHashSet<>();
for (int i = 0; i < n; ++i) {
tmp.add("" + i);
}
return tmp;
}
@Test
public void queryNormalization() {
Query q = Parser.parseQuery("name,a,:eq,name,b,:eq,:or,key,b,:eq,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier);
idx.add(q, q);
assertEquals(list(q), idx, id("a", "key", "b"));
assertEquals(list(q), idx, id("b", "key", "b"));
}
@Test
public void inClauseExpansion() {
// If the :in clauses are fully expanded with a cross-product, then this will cause an OOM
// error because of the combinatorial explosion of simple queries (10k * 10k * 10k).
Query q1 = new Query.In("a", set(10000));
Query q2 = new Query.In("b", set(10000));
Query q3 = new Query.In("c", set(10000));
Query query = q1.and(q2).and(q3);
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(query, query);
Id id1 = id("cpu", "a", "1", "b", "9999", "c", "727");
assertEquals(list(query), idx, id1);
}
@Test
public void manyQueries() {
// CpuUsage for all instances
Query cpuUsage = Parser.parseQuery("name,cpuUsage,:eq");
// DiskUsage query per node
Query diskUsage = Parser.parseQuery("name,diskUsage,:eq");
List<Query> diskUsagePerNode = new ArrayList<>();
for (int i = 0; i < 100; ++i) {
String node = String.format("i-%05d", i);
Query q = new Query.And(new Query.Equal("nf.node", node), diskUsage);
diskUsagePerNode.add(q);
}
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier)
.add(cpuUsage, cpuUsage)
.add(diskUsage, diskUsage);
for (Query q : diskUsagePerNode) {
idx.add(q, q);
}
// Matching
assertEquals(
list(cpuUsage),
idx,
id("cpuUsage", "nf.node", "unknown"));
assertEquals(
list(cpuUsage),
idx,
id("cpuUsage", "nf.node", "i-00099"));
assertEquals(
list(diskUsage),
idx,
id("diskUsage", "nf.node", "unknown"));
assertEquals(
list(diskUsage, diskUsagePerNode.get(diskUsagePerNode.size() - 1)),
idx,
id("diskUsage", "nf.node", "i-00099"));
// Shouldn't match
assertEquals(
Collections.emptyList(),
idx,
id("memoryUsage", "nf.node", "i-00099"));
}
@Test
public void multipleClausesForSameKey() {
Query q = Parser.parseQuery("name,abc.*,:re,name,.*def,:re,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(q, q);
// Doesn't match prefix check
assertEquals(Collections.emptyList(), idx, id("foodef"));
// Doesn't match suffix check
assertEquals(Collections.emptyList(), idx, id("abcbar"));
// Matches both
assertEquals(list(q), idx, id("abcdef"));
assertEquals(list(q), idx, id("abc def"));
}
@Test
public void notEqClause() {
Query q = Parser.parseQuery("name,cpu,:eq,id,user,:eq,:not,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(q, q);
assertEquals(list(q), idx, id("cpu", "id", "system"));
assertEquals(Collections.emptyList(), idx, id("cpu", "id", "user"));
}
@Test
public void notEqMissingKey() {
Query q = Parser.parseQuery("name,cpu,:eq,id,user,:eq,:not,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(q, q);
assertEquals(list(q), idx, id("cpu"));
}
@Test
public void notEqMissingKeyMiddle() {
Query q = Parser.parseQuery("name,cpu,:eq,mm,foo,:eq,:not,:and,zz,bar,:eq,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(q, q);
assertEquals(list(q), idx, id("cpu", "zz", "bar"));
}
@Test
public void notEqMissingKeyEnd() {
Query q = Parser.parseQuery("name,cpu,:eq,zz,foo,:eq,:not,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(q, q);
assertEquals(list(q), idx, id("cpu"));
}
@Test
public void multiNotEqClause() {
Query q = Parser.parseQuery("name,cpu,:eq,id,system,:eq,:and,id,user,:eq,:not,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(q, q);
assertEquals(list(q), idx, id("cpu", "id", "system"));
assertEquals(Collections.emptyList(), idx, id("cpu", "id", "user"));
}
@Test
public void notInClause() {
Query q = Parser.parseQuery("name,cpu,:eq,id,(,user,iowait,),:in,:not,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(q, q);
assertEquals(list(q), idx, id("cpu", "id", "system"));
assertEquals(Collections.emptyList(), idx, id("cpu", "id", "user"));
assertEquals(Collections.emptyList(), idx, id("cpu", "id", "iowait"));
}
@Test
public void multiNotInClause() {
Query q = Parser.parseQuery("name,cpu,:eq,id,system,:eq,:and,id,(,user,iowait,),:in,:not,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(q, q);
assertEquals(list(q), idx, id("cpu", "id", "system"));
assertEquals(Collections.emptyList(), idx, id("cpu", "id", "user"));
assertEquals(Collections.emptyList(), idx, id("cpu", "id", "iowait"));
}
@Test
public void doubleNotsSameKey() {
Query q = Parser.parseQuery("a,1,:eq,b,2,:eq,:and,c,3,:eq,:not,:and,c,4,:eq,:not,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(q, q);
assertEquals(list(q), idx, id("cpu", "a", "1", "b", "2", "c", "5"));
assertEquals(Collections.emptyList(), idx, id("cpu", "a", "1", "b", "2", "c", "3"));
assertEquals(Collections.emptyList(), idx, id("cpu", "a", "1", "b", "2", "c", "4"));
assertEquals(list(q), idx, id("cpu", "a", "1", "b", "2"));
}
@Test
public void removalOfNotQuery() {
Query q = Parser.parseQuery("name,cpu,:eq,id,user,:eq,:not,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(q, q);
Assertions.assertTrue(idx.remove(q, q));
Assertions.assertTrue(idx.isEmpty());
}
@Test
public void removalOfNotQueryUsingQuery() {
Query q = Parser.parseQuery("name,cpu,:eq,id,user,:eq,:not,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier).add(q, q);
Assertions.assertTrue(remove(idx, q));
Assertions.assertTrue(idx.isEmpty());
}
@Test
public void removalPrefixRegexSubtree() {
Query q1 = Parser.parseQuery("name,test,:eq,a,foo,:re,:and,b,bar,:eq,:and");
Query q2 = Parser.parseQuery("name,test,:eq,a,foo,:re,:and");
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier)
.add(q1, q1)
.add(q2, q2);
Id id = id("test", "a", "foo", "b", "bar");
assertEquals(list(q2, q1), idx, id);
idx.remove(q1, q1);
assertEquals(list(q2), idx, id);
}
@Test
public void toStringMethod() {
QueryIndex<Query> idx = QueryIndex.newInstance(cacheSupplier);
idx.add(SIMPLE_QUERY, SIMPLE_QUERY);
idx.add(HASKEY_QUERY, HASKEY_QUERY);
idx.add(IN_QUERY, IN_QUERY);
String expected = "key: [name]\n" +
"equal checks:\n" +
"- [a]\n" +
" key: [key]\n" +
" equal checks:\n" +
" - [b]\n" +
" matches:\n" +
" - [name,a,:eq,key,b,:eq,:and]\n" +
" - [name,a,:eq,key,(,b,c,),:in,:and]\n" +
" - [c]\n" +
" matches:\n" +
" - [name,a,:eq,key,(,b,c,),:in,:and]\n" +
" other keys:\n" +
" key: [c]\n" +
" has key:\n" +
" key: [key]\n" +
" equal checks:\n" +
" - [b]\n" +
" matches:\n" +
" - [name,a,:eq,key,b,:eq,:and,c,:has,:and]\n";
String actual = idx.toString();
Assertions.assertEquals(expected, actual);
}
@Test
public void addRemoveFuzz() {
Registry registry = new NoopRegistry();
Random random = new Random(42);
QueryIndex<Integer> idx = QueryIndex.newInstance(registry);
for (int i = 0; i < 25; ++i) {
int n = 1_000;
List<Query> queries = new ArrayList<>(n);
for (int j = 0; j < n; ++j) {
queries.add(DataGenerator.randomQuery(random, 6));
}
for (int j = 0; j < n; ++j) {
Query query = queries.get(j);
idx.add(query, j);
}
for (int j = 0; j < n; ++j) {
Query query = queries.get(j);
Assertions.assertEquals(query != Query.FALSE, idx.remove(query, j));
Assertions.assertFalse(idx.remove(query, j));
}
Assertions.assertTrue(idx.isEmpty());
}
}
} | 6,050 |
0 | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/test/java/com/netflix/spectator/atlas/impl/MeasurementSerializerTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.impl.AsciiSet;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.Collections;
public class MeasurementSerializerTest {
private final AsciiSet set = AsciiSet.fromPattern("-._A-Za-z0-9");
private final DefaultRegistry registry = new DefaultRegistry();
private final SimpleModule module = new SimpleModule()
.addSerializer(Measurement.class, new MeasurementSerializer(s -> set.replaceNonMembers(s, '_')));
private final ObjectMapper mapper = new ObjectMapper().registerModule(module);
@Test
public void encode() throws Exception {
Id id = registry.createId("foo", "bar", "baz");
Measurement m = new Measurement(id, 42L, 3.0);
String json = mapper.writeValueAsString(m);
String tags = "{\"name\":\"foo\",\"bar\":\"baz\",\"atlas.dstype\":\"gauge\"}";
String expected = "{\"tags\":" + tags + ",\"timestamp\":42,\"value\":3.0}";
Assertions.assertEquals(expected, json);
}
@Test
public void explicitDsType() throws Exception {
Id id = registry.createId("foo", "atlas.dstype", "rate");
Measurement m = new Measurement(id, 42L, 3.0);
String json = mapper.writeValueAsString(m);
String tags = "{\"name\":\"foo\",\"atlas.dstype\":\"rate\"}";
String expected = "{\"tags\":" + tags + ",\"timestamp\":42,\"value\":3.0}";
Assertions.assertEquals(expected, json);
}
@Test
public void invalidName() throws Exception {
Id id = registry.createId("f@%", "bar", "baz");
Measurement m = new Measurement(id, 42L, 3.0);
String json = mapper.writeValueAsString(m);
String tags = "{\"name\":\"f__\",\"bar\":\"baz\",\"atlas.dstype\":\"gauge\"}";
String expected = "{\"tags\":" + tags + ",\"timestamp\":42,\"value\":3.0}";
Assertions.assertEquals(expected, json);
}
@Test
public void userTagName() throws Exception {
Id id = registry.createId("foo", "name", "bar");
Measurement m = new Measurement(id, 42L, 3.0);
String json = mapper.writeValueAsString(m);
String tags = "{\"name\":\"foo\",\"atlas.dstype\":\"gauge\"}";
String expected = "{\"tags\":" + tags + ",\"timestamp\":42,\"value\":3.0}";
Assertions.assertEquals(expected, json);
}
@Test
public void invalidKey() throws Exception {
Id id = registry.createId("foo", "b$$", "baz");
Measurement m = new Measurement(id, 42L, 3.0);
String json = mapper.writeValueAsString(m);
String tags = "{\"name\":\"foo\",\"b__\":\"baz\",\"atlas.dstype\":\"gauge\"}";
String expected = "{\"tags\":" + tags + ",\"timestamp\":42,\"value\":3.0}";
Assertions.assertEquals(expected, json);
}
@Test
public void invalidValue() throws Exception {
Id id = registry.createId("foo", "bar", "b&*");
Measurement m = new Measurement(id, 42L, 3.0);
String json = mapper.writeValueAsString(m);
String tags = "{\"name\":\"foo\",\"bar\":\"b__\",\"atlas.dstype\":\"gauge\"}";
String expected = "{\"tags\":" + tags + ",\"timestamp\":42,\"value\":3.0}";
Assertions.assertEquals(expected, json);
}
@Test
public void publishPayloadEmpty() throws Exception {
PublishPayload p = new PublishPayload(Collections.emptyMap(), Collections.emptyList());
String json = mapper.writeValueAsString(p);
String expected = "{\"tags\":{},\"metrics\":[]}";
Assertions.assertEquals(expected, json);
}
@Test
public void publishPayloadNoCommonTags() throws Exception {
Id id = registry.createId("foo");
Measurement m = new Measurement(id, 42L, 3.0);
PublishPayload p = new PublishPayload(Collections.emptyMap(), Collections.singletonList(m));
String json = mapper.writeValueAsString(p);
String tags = "{\"name\":\"foo\",\"atlas.dstype\":\"gauge\"}";
String mjson = "{\"tags\":" + tags + ",\"timestamp\":42,\"value\":3.0}";
String expected = "{\"tags\":{},\"metrics\":[" + mjson + "]}";
Assertions.assertEquals(expected, json);
}
@Test
public void publishPayloadWithCommonTags() throws Exception {
Id id = registry.createId("foo");
Measurement m = new Measurement(id, 42L, 3.0);
PublishPayload p = new PublishPayload(Collections.singletonMap("a", "b"), Collections.singletonList(m));
String json = mapper.writeValueAsString(p);
String tags = "{\"name\":\"foo\",\"atlas.dstype\":\"gauge\"}";
String mjson = "{\"tags\":" + tags + ",\"timestamp\":42,\"value\":3.0}";
String expected = "{\"tags\":{\"a\":\"b\"},\"metrics\":[" + mjson + "]}";
Assertions.assertEquals(expected, json);
}
}
| 6,051 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/AtlasDistributionSummary.java | /*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.api.DistributionSummary;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Statistic;
import com.netflix.spectator.impl.StepDouble;
import com.netflix.spectator.impl.StepLong;
import com.netflix.spectator.impl.StepValue;
import java.util.concurrent.atomic.AtomicLong;
/**
* Distribution summary that reports four measurements to Atlas:
*
* <ul>
* <li><b>count:</b> counter incremented each time record is called</li>
* <li><b>totalAmount:</b> counter incremented by the recorded amount</li>
* <li><b>totalOfSquares:</b> counter incremented by the recorded amount<sup>2</sup></li>
* <li><b>max:</b> maximum recorded amount</li>
* </ul>
*
* <p>Having an explicit {@code totalAmount} and {@code count} on the backend
* can be used to calculate an accurate average for an arbitrary grouping. The
* {@code totalOfSquares} is used for computing a standard deviation.</p>
*
* <p>Note that the {@link #count()} and {@link #totalAmount()} will report
* the values since the last complete interval rather than the total for the
* life of the process.</p>
*/
class AtlasDistributionSummary extends AtlasMeter implements DistributionSummary {
private final StepLong count;
private final StepLong total;
private final StepDouble totalOfSquares;
private final StepLong max;
private final Id[] stats;
/** Create a new instance. */
AtlasDistributionSummary(Id id, Clock clock, long ttl, long step) {
super(id, clock, ttl);
this.count = new StepLong(0L, clock, step);
this.total = new StepLong(0L, clock, step);
this.totalOfSquares = new StepDouble(0.0, clock, step);
this.max = new StepLong(0L, clock, step);
this.stats = new Id[] {
id.withTags(DsType.rate, Statistic.count),
id.withTags(DsType.rate, Statistic.totalAmount),
id.withTags(DsType.rate, Statistic.totalOfSquares),
id.withTags(DsType.gauge, Statistic.max)
};
}
@Override void measure(long now, MeasurementConsumer consumer) {
reportMeasurement(now, consumer, stats[0], count);
reportMeasurement(now, consumer, stats[1], total);
reportMeasurement(now, consumer, stats[2], totalOfSquares);
reportMaxMeasurement(now, consumer, stats[3], max);
}
private void reportMeasurement(long now, MeasurementConsumer consumer, Id mid, StepValue v) {
// poll needs to be called before accessing the timestamp to ensure
// the counters have been rotated if there was no activity in the
// current interval.
double rate = v.pollAsRate(now);
long timestamp = v.timestamp();
consumer.accept(mid, timestamp, rate);
}
private void reportMaxMeasurement(long now, MeasurementConsumer consumer, Id mid, StepLong v) {
// poll needs to be called before accessing the timestamp to ensure
// the counters have been rotated if there was no activity in the
// current interval.
double maxValue = v.poll(now);
long timestamp = v.timestamp();
consumer.accept(mid, timestamp, maxValue);
}
@Override public void record(long amount) {
long now = clock.wallTime();
count.getCurrent(now).incrementAndGet();
if (amount > 0) {
total.getCurrent(now).addAndGet(amount);
totalOfSquares.getCurrent(now).addAndGet((double) amount * amount);
updateMax(max.getCurrent(now), amount);
}
updateLastModTime(now);
}
@Override public void record(long[] amounts, int n) {
final int limit = Math.min(Math.max(0, n), amounts.length);
long accumulatedTotal = 0;
long accumulatedMax = Long.MIN_VALUE;
double accumulatedTotalOfSquares = 0.0;
// accumulate results
for (int i = 0; i < limit; i++) {
if (amounts[i] > 0) {
accumulatedTotal += amounts[i];
accumulatedTotalOfSquares += ((double) amounts[i] * amounts[i]);
accumulatedMax = Math.max(amounts[i], accumulatedMax);
}
}
// issue updates as a batch
final long now = clock.wallTime();
count.getCurrent(now).addAndGet(limit);
total.getCurrent(now).addAndGet(accumulatedTotal);
totalOfSquares.getCurrent(now).addAndGet(accumulatedTotalOfSquares);
updateMax(max.getCurrent(now), accumulatedMax);
updateLastModTime(now);
}
private void updateMax(AtomicLong maxValue, long v) {
long p = maxValue.get();
while (v > p && !maxValue.compareAndSet(p, v)) {
p = maxValue.get();
}
}
@Override public long count() {
return count.poll();
}
@Override public long totalAmount() {
return total.poll();
}
@Override public BatchUpdater batchUpdater(int batchSize) {
AtlasDistSummaryBatchUpdater updater = new AtlasDistSummaryBatchUpdater(batchSize);
updater.accept(() -> this);
return updater;
}
/**
* Helper to allow the batch updater to directly update the individual stats.
*/
void update(long count, long total, double totalOfSquares, long max) {
long now = clock.wallTime();
this.count.getCurrent(now).addAndGet(count);
this.total.getCurrent(now).addAndGet(total);
this.totalOfSquares.getCurrent(now).addAndGet(totalOfSquares);
updateMax(this.max.getCurrent(now), max);
}
}
| 6,052 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/AtlasDistSummaryBatchUpdater.java | /*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.DistributionSummary;
import java.util.function.Consumer;
import java.util.function.Supplier;
final class AtlasDistSummaryBatchUpdater
implements DistributionSummary.BatchUpdater, Consumer<Supplier<DistributionSummary>> {
private Supplier<DistributionSummary> distSummarySupplier;
private final int batchSize;
private int count;
private long total;
private double totalOfSquares;
private long max;
AtlasDistSummaryBatchUpdater(int batchSize) {
this.batchSize = batchSize;
}
@Override
public void accept(Supplier<DistributionSummary> distSummarySupplier) {
this.distSummarySupplier = distSummarySupplier;
}
private AtlasDistributionSummary getDistributionSummary() {
if (distSummarySupplier != null) {
DistributionSummary d = distSummarySupplier.get();
return (d instanceof AtlasDistributionSummary) ? (AtlasDistributionSummary) d : null;
}
return null;
}
@Override
public void record(long amount) {
++count;
if (amount > 0L) {
total += amount;
totalOfSquares += (double) amount * amount;
if (amount > max) {
max = amount;
}
}
if (count >= batchSize) {
flush();
}
}
@Override
public void flush() {
AtlasDistributionSummary distSummary = getDistributionSummary();
if (distSummary != null) {
distSummary.update(count, total, totalOfSquares, max);
count = 0;
total = 0L;
totalOfSquares = 0.0;
max = 0L;
}
}
@Override
public void close() throws Exception {
flush();
}
}
| 6,053 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/AtlasCounter.java | /*
* Copyright 2014-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Statistic;
import com.netflix.spectator.impl.StepDouble;
/**
* Counter that reports a rate per second to Atlas. Note that {@link #count()} will
* report the number events in the last complete interval rather than the total for
* the life of the process.
*/
class AtlasCounter extends AtlasMeter implements Counter {
private final StepDouble value;
private final Id stat;
/** Create a new instance. */
AtlasCounter(Id id, Clock clock, long ttl, long step) {
super(id, clock, ttl);
this.value = new StepDouble(0.0, clock, step);
// Add the statistic for typing. Re-adding the tags from the id is to retain
// the statistic from the id if it was already set
this.stat = AtlasMeter.addIfMissing(id, Statistic.count, DsType.rate);
}
@Override void measure(long now, MeasurementConsumer consumer) {
final double rate = value.pollAsRate(now);
consumer.accept(stat, value.timestamp(), rate);
}
@Override public void add(double amount) {
if (Double.isFinite(amount) && amount > 0.0) {
long now = clock.wallTime();
value.getCurrent(now).addAndGet(amount);
updateLastModTime(now);
}
}
@Override public double actualCount() {
return value.poll();
}
}
| 6,054 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/AtlasTimerBatchUpdater.java | /*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Timer;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Supplier;
final class AtlasTimerBatchUpdater implements Timer.BatchUpdater, Consumer<Supplier<Timer>> {
private Supplier<Timer> timerSupplier;
private final int batchSize;
private int count;
private double total;
private double totalOfSquares;
private long max;
AtlasTimerBatchUpdater(int batchSize) {
this.batchSize = batchSize;
}
@Override
public void accept(Supplier<Timer> timerSupplier) {
this.timerSupplier = timerSupplier;
}
private AtlasTimer getTimer() {
if (timerSupplier != null) {
Timer t = timerSupplier.get();
return (t instanceof AtlasTimer) ? (AtlasTimer) t : null;
}
return null;
}
@Override
public void record(long amount, TimeUnit unit) {
++count;
if (amount > 0L) {
final long nanos = unit.toNanos(amount);
total += nanos;
totalOfSquares += (double) nanos * nanos;
if (nanos > max) {
max = nanos;
}
}
if (count >= batchSize) {
flush();
}
}
@Override
public void flush() {
AtlasTimer timer = getTimer();
if (timer != null) {
timer.update(count, total, totalOfSquares, max);
count = 0;
total = 0.0;
totalOfSquares = 0.0;
max = 0L;
}
}
@Override
public void close() throws Exception {
flush();
}
}
| 6,055 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/MeasurementConsumer.java | /*
* Copyright 2014-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Id;
@FunctionalInterface
interface MeasurementConsumer {
void accept(Id id, long timestamp, double value);
}
| 6,056 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/AtlasTimer.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Statistic;
import com.netflix.spectator.api.Timer;
import com.netflix.spectator.impl.StepDouble;
import com.netflix.spectator.impl.StepLong;
import com.netflix.spectator.impl.StepValue;
import java.time.Duration;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
/**
* Timer that reports four measurements to Atlas:
*
* <ul>
* <li><b>count:</b> counter incremented each time record is called</li>
* <li><b>totalTime:</b> counter incremented by the recorded amount</li>
* <li><b>totalOfSquares:</b> counter incremented by the recorded amount<sup>2</sup></li>
* <li><b>max:</b> maximum recorded amount</li>
* </ul>
*
* <p>Having an explicit {@code totalTime} and {@code count} on the backend
* can be used to calculate an accurate average for an arbitrary grouping. The
* {@code totalOfSquares} is used for computing a standard deviation.</p>
*
* <p>Note that the {@link #count()} and {@link #totalTime()} will report
* the values since the last complete interval rather than the total for the
* life of the process.</p>
*/
class AtlasTimer extends AtlasMeter implements Timer {
private final StepLong count;
private final StepDouble total;
private final StepDouble totalOfSquares;
private final StepLong max;
private final Id[] stats;
/** Create a new instance. */
AtlasTimer(Id id, Clock clock, long ttl, long step) {
super(id, clock, ttl);
this.count = new StepLong(0L, clock, step);
this.total = new StepDouble(0.0, clock, step);
this.totalOfSquares = new StepDouble(0.0, clock, step);
this.max = new StepLong(0L, clock, step);
this.stats = new Id[] {
id.withTags(DsType.rate, Statistic.count),
id.withTags(DsType.rate, Statistic.totalTime),
id.withTags(DsType.rate, Statistic.totalOfSquares),
id.withTags(DsType.gauge, Statistic.max)
};
}
@Override void measure(long now, MeasurementConsumer consumer) {
reportMeasurement(now, consumer, stats[0], count, 1.0);
reportMeasurement(now, consumer, stats[1], total, 1e-9);
reportMeasurement(now, consumer, stats[2], totalOfSquares, 1e-18);
reportMaxMeasurement(now, consumer, stats[3], max);
}
private void reportMeasurement(long now, MeasurementConsumer consumer, Id mid, StepValue v, double f) {
// poll needs to be called before accessing the timestamp to ensure
// the counters have been rotated if there was no activity in the
// current interval.
double rate = v.pollAsRate(now) * f;
long timestamp = v.timestamp();
consumer.accept(mid, timestamp, rate);
}
private void reportMaxMeasurement(long now, MeasurementConsumer consumer, Id mid, StepLong v) {
// poll needs to be called before accessing the timestamp to ensure
// the counters have been rotated if there was no activity in the
// current interval.
double maxValue = v.poll(now) / 1e9;
long timestamp = v.timestamp();
consumer.accept(mid, timestamp, maxValue);
}
@Override public Clock clock() {
return clock;
}
@Override public void record(long amount, TimeUnit unit) {
long now = clock.wallTime();
count.getCurrent(now).incrementAndGet();
if (amount > 0) {
final long nanos = unit.toNanos(amount);
total.getCurrent(now).addAndGet(nanos);
totalOfSquares.getCurrent(now).addAndGet((double) nanos * nanos);
updateMax(max.getCurrent(now), nanos);
}
updateLastModTime(now);
}
@Override public void record(long[] amounts, int n, TimeUnit unit) {
final int limit = Math.min(Math.max(0, n), amounts.length);
double accumulatedTotal = 0.0;
long accumulatedMax = Long.MIN_VALUE;
double accumulatedTotalOfSquares = 0.0;
// accumulate results
for (int i = 0; i < limit; i++) {
final long nanos = unit.toNanos(amounts[i]);
if (nanos > 0) {
accumulatedTotal += nanos;
accumulatedTotalOfSquares += ((double) nanos * nanos);
accumulatedMax = Math.max(nanos, accumulatedMax);
}
}
// issue updates as a batch
final long now = clock.wallTime();
count.getCurrent(now).addAndGet(limit);
total.getCurrent(now).addAndGet(accumulatedTotal);
totalOfSquares.getCurrent(now).addAndGet(accumulatedTotalOfSquares);
updateMax(max.getCurrent(now), accumulatedMax);
updateLastModTime(now);
}
@Override public void record(Duration[] amounts, int n) {
final int limit = Math.min(Math.max(0, n), amounts.length);
double accumulatedTotal = 0.0;
long accumulatedMax = Long.MIN_VALUE;
double accumulatedTotalOfSquares = 0.0;
// accumulate results
for (int i = 0; i < limit; i++) {
final long nanos = amounts[i].toNanos();
if (nanos > 0) {
accumulatedTotal += nanos;
accumulatedTotalOfSquares += ((double) nanos * nanos);
accumulatedMax = Math.max(nanos, accumulatedMax);
}
}
// issue updates as a batch
final long now = clock.wallTime();
count.getCurrent(now).addAndGet(limit);
total.getCurrent(now).addAndGet(accumulatedTotal);
totalOfSquares.getCurrent(now).addAndGet(accumulatedTotalOfSquares);
updateMax(max.getCurrent(now), accumulatedMax);
updateLastModTime(now);
}
private void updateMax(AtomicLong maxValue, long v) {
long p = maxValue.get();
while (v > p && !maxValue.compareAndSet(p, v)) {
p = maxValue.get();
}
}
@Override public long count() {
return count.poll();
}
@Override public long totalTime() {
// Cannot change the return type since this is a public API so the result of this can
// potentially overflow and result in a negative value. This is predominately used for
// unit tests so it is rarely a problem in practice. API can be revisited in 2.0.
return (long) total.poll();
}
@Override public BatchUpdater batchUpdater(int batchSize) {
AtlasTimerBatchUpdater updater = new AtlasTimerBatchUpdater(batchSize);
updater.accept(() -> this);
return updater;
}
/**
* Helper to allow the batch updater to directly update the individual stats.
*/
void update(long count, double total, double totalOfSquares, long max) {
long now = clock.wallTime();
this.count.getCurrent(now).addAndGet(count);
this.total.getCurrent(now).addAndGet(total);
this.totalOfSquares.getCurrent(now).addAndGet(totalOfSquares);
updateMax(this.max.getCurrent(now), max);
}
}
| 6,057 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/SubscriptionManager.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.atlas.impl.Subscription;
import com.netflix.spectator.atlas.impl.Subscriptions;
import com.netflix.spectator.ipc.http.HttpClient;
import com.netflix.spectator.ipc.http.HttpResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.InputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* Helper for managing the set of LWC subscriptions.
*/
class SubscriptionManager {
private static final Logger LOGGER = LoggerFactory.getLogger(SubscriptionManager.class);
private final ObjectMapper mapper;
private final HttpClient client;
private final Clock clock;
private final URI uri;
private final int connectTimeout;
private final int readTimeout;
private final long stepMillis;
private final long lwcStepMillis;
private final boolean ignorePublishStep;
private final long configTTL;
private final Map<Subscription, Long> subscriptions = new ConcurrentHashMap<>();
private Subscriptions payload;
private String etag;
/** Create a new instance. */
SubscriptionManager(ObjectMapper mapper, HttpClient client, Clock clock, AtlasConfig config) {
this.mapper = mapper;
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
this.client = client;
this.clock = clock;
this.uri = URI.create(config.configUri());
this.connectTimeout = (int) config.connectTimeout().toMillis();
this.readTimeout = (int) config.readTimeout().toMillis();
this.stepMillis = config.step().toMillis();
this.lwcStepMillis = config.lwcStep().toMillis();
this.ignorePublishStep = config.lwcIgnorePublishStep();
this.configTTL = config.configTTL().toMillis();
}
/** Returns the current set of active subscriptions. */
List<Subscription> subscriptions() {
return new ArrayList<>(subscriptions.keySet());
}
/** Refresh the subscriptions from the server. */
void refresh() {
// Request latest expressions from the server
try {
HttpResponse res = client.get(uri)
.withConnectTimeout(connectTimeout)
.withReadTimeout(readTimeout)
.acceptGzip()
.addHeader("If-None-Match", etag)
.reuseResponseBuffers(true)
.send();
if (res.status() == 304) {
LOGGER.debug("no modification to subscriptions");
} else if (res.status() != 200) {
LOGGER.warn("failed to update subscriptions, received status {}", res.status());
} else {
etag = res.header("ETag");
try (InputStream in = res.entityInputStream()) {
payload = filterByStep(mapper.readValue(in, Subscriptions.class));
}
}
} catch (Exception e) {
LOGGER.warn("failed to update subscriptions (uri={})", uri, e);
}
// Update with the current payload, it will be null if there hasn't been a single
// successful request
if (payload != null) {
long now = clock.wallTime();
payload.update(subscriptions, now, now + configTTL);
}
}
private Subscriptions filterByStep(Subscriptions subs) {
List<Subscription> filtered = new ArrayList<>(subs.getExpressions().size());
for (Subscription sub : subs.getExpressions()) {
if (isSupportedFrequency(sub.getFrequency())) {
filtered.add(sub);
} else {
LOGGER.trace("ignored subscription with invalid frequency: {}", sub);
}
}
return new Subscriptions().withExpressions(filtered);
}
private boolean isSupportedFrequency(long s) {
return s >= lwcStepMillis && s % lwcStepMillis == 0 && (s != stepMillis || !ignorePublishStep);
}
}
| 6,058 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/AtlasMeter.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Meter;
import com.netflix.spectator.api.Tag;
import java.util.ArrayList;
import java.util.List;
/** Base class for core meter types used by AtlasRegistry. */
abstract class AtlasMeter implements Meter {
/**
* Add the new tags to the id if they are not already present. Tries to minimize the number
* of allocations by checking if they are present first.
*/
static Id addIfMissing(Id id, Tag t1, Tag t2) {
String k1 = t1.key();
String k2 = t2.key();
boolean hasT1 = false;
boolean hasT2 = false;
for (int i = 1; i < id.size(); ++i) {
hasT1 = hasT1 || k1.equals(id.getKey(i));
hasT2 = hasT2 || k2.equals(id.getKey(i));
if (hasT1 && hasT2) {
break;
}
}
if (hasT1 && hasT2) {
return id;
} else if (!hasT1 && !hasT2) {
return id.withTags(t1.key(), t1.value(), t2.key(), t2.value());
} else if (!hasT1) {
return id.withTag(t1);
} else {
return id.withTag(t2);
}
}
/** Base identifier for all measurements supplied by this meter. */
protected final Id id;
/** Time source for checking if the meter has expired. */
protected final Clock clock;
/** TTL value for an inactive meter. */
private final long ttl;
/** Last time this meter was updated. */
private volatile long lastUpdated;
/** Create a new instance. */
AtlasMeter(Id id, Clock clock, long ttl) {
this.id = id;
this.clock = clock;
this.ttl = ttl;
lastUpdated = clock.wallTime();
}
/**
* Updates the last updated timestamp for the meter to indicate it is active and should
* not be considered expired.
*/
void updateLastModTime(long now) {
lastUpdated = now;
}
@Override public Id id() {
return id;
}
@Override public boolean hasExpired() {
return hasExpired(clock.wallTime());
}
boolean hasExpired(long now) {
return now - lastUpdated > ttl;
}
@Override public Iterable<Measurement> measure() {
long now = clock.wallTime();
List<Measurement> ms = new ArrayList<>();
measure(now, (id, timestamp, value) -> ms.add(new Measurement(id, timestamp, value)));
return ms;
}
abstract void measure(long now, MeasurementConsumer consumer);
}
| 6,059 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/Rollups.java | /*
* Copyright 2014-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.NoopRegistry;
import com.netflix.spectator.api.Utils;
import com.netflix.spectator.atlas.impl.Parser;
import com.netflix.spectator.atlas.impl.Query;
import com.netflix.spectator.atlas.impl.QueryIndex;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.DoubleBinaryOperator;
import java.util.function.Function;
/**
* Helper functions for performing rollups.
*/
final class Rollups {
private Rollups() {
}
/**
* Create a rollup policy based on a list of rules.
*/
static RollupPolicy fromRules(Map<String, String> commonTags, List<RollupPolicy.Rule> rules) {
QueryIndex<RollupPolicy.Rule> index = QueryIndex.newInstance(new NoopRegistry());
for (RollupPolicy.Rule rule : rules) {
// Apply common tags to simplify the query and avoid needing to merge with the ids
// before evaluating the query
Query query = Parser.parseQuery(rule.query()).simplify(commonTags);
index.add(query, rule);
}
return ms -> {
// Common tags -> aggregated measurements
Map<Map<String, String>, Map<Id, Aggregator>> aggregates = new HashMap<>();
for (Measurement m : ms) {
List<RollupPolicy.Rule> matches = index.findMatches(m.id());
if (matches.isEmpty()) {
// No matches for the id, but we sill need to treat as an aggregate because
// rollup on another id could cause a collision
Map<Id, Aggregator> idMap = aggregates.computeIfAbsent(commonTags, k -> new HashMap<>());
updateAggregate(idMap, m.id(), m);
} else {
// For matching rules, find dimensions from common tags and others that are part
// of the id
Set<String> commonDimensions = new HashSet<>();
Set<String> otherDimensions = new HashSet<>();
for (RollupPolicy.Rule rule : matches) {
for (String dimension : rule.rollup()) {
if (commonTags.containsKey(dimension)) {
commonDimensions.add(dimension);
} else {
otherDimensions.add(dimension);
}
}
}
// Peform rollup by removing the dimensions
Map<String, String> tags = commonDimensions.isEmpty()
? commonTags
: rollup(commonTags, commonDimensions);
Id id = otherDimensions.isEmpty()
? m.id()
: m.id().filterByKey(k -> !otherDimensions.contains(k));
Map<Id, Aggregator> idMap = aggregates.computeIfAbsent(tags, k -> new HashMap<>());
updateAggregate(idMap, id, m);
}
}
// Convert to final result type
List<RollupPolicy.Result> results = new ArrayList<>();
for (Map.Entry<Map<String, String>, Map<Id, Aggregator>> entry : aggregates.entrySet()) {
results.add(new RollupPolicy.Result(entry.getKey(), toMeasurements(entry.getValue())));
}
return results;
};
}
private static Map<String, String> rollup(Map<String, String> tags, Set<String> dimensions) {
Map<String, String> tmp = new HashMap<>(tags);
for (String dimension : dimensions) {
tmp.remove(dimension);
}
return tmp;
}
/**
* Aggregate the measurements after applying the mapping function to the ids. Counters types
* will use a sum aggregation and gauges will use a max aggregation.
*
* @param idMapper
* Map an id to a new identifier that will be used for the resulting aggregate measurement.
* @param measurements
* Set of input measurements to aggregate.
* @return
* Aggregated set of measurements.
*/
static List<Measurement> aggregate(Function<Id, Id> idMapper, List<Measurement> measurements) {
Map<Id, Aggregator> aggregates = new HashMap<>();
for (Measurement m : measurements) {
Id id = idMapper.apply(m.id());
if (id != null) {
updateAggregate(aggregates, id, m);
}
}
return toMeasurements(aggregates);
}
private static void updateAggregate(Map<Id, Aggregator> aggregates, Id id, Measurement m) {
Aggregator aggregator = aggregates.get(id);
if (aggregator == null) {
aggregator = newAggregator(id, m);
aggregates.put(id, aggregator);
} else {
aggregator.update(m);
}
}
private static List<Measurement> toMeasurements(Map<Id, Aggregator> aggregates) {
List<Measurement> result = new ArrayList<>(aggregates.size());
for (Aggregator aggregator : aggregates.values()) {
result.add(aggregator.toMeasurement());
}
return result;
}
private static final Set<String> SUM_STATS = new LinkedHashSet<>();
static {
SUM_STATS.add("count");
SUM_STATS.add("totalAmount");
SUM_STATS.add("totalTime");
SUM_STATS.add("totalOfSquares");
SUM_STATS.add("percentile");
}
private static final DoubleBinaryOperator SUM = nanAwareOp(Double::sum);
private static final DoubleBinaryOperator MAX = nanAwareOp(Double::max);
private static DoubleBinaryOperator nanAwareOp(DoubleBinaryOperator op) {
return (a, b) -> Double.isNaN(a) ? b : Double.isNaN(b) ? a : op.applyAsDouble(a, b);
}
private static Aggregator newAggregator(Id id, Measurement m) {
String statistic = Utils.getTagValue(id, "statistic");
if (statistic != null && SUM_STATS.contains(statistic)) {
return new Aggregator(id.withTag(DsType.sum), m.timestamp(), SUM, m.value());
} else {
return new Aggregator(id, m.timestamp(), MAX, m.value());
}
}
private static class Aggregator {
private final Id id;
private final long timestamp;
private final DoubleBinaryOperator af;
private double value;
Aggregator(Id id, long timestamp, DoubleBinaryOperator af, double init) {
this.id = id;
this.timestamp = timestamp;
this.af = af;
this.value = init;
}
void update(Measurement m) {
value = af.applyAsDouble(value, m.value());
}
Measurement toMeasurement() {
return new Measurement(id, timestamp, value);
}
}
}
| 6,060 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/AtlasRegistry.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.spectator.api.AbstractRegistry;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.DistributionSummary;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Meter;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Timer;
import com.netflix.spectator.api.Utils;
import com.netflix.spectator.atlas.impl.Consolidator;
import com.netflix.spectator.atlas.impl.DefaultPublisher;
import com.netflix.spectator.atlas.impl.EvalPayload;
import com.netflix.spectator.atlas.impl.Evaluator;
import com.netflix.spectator.atlas.impl.EvaluatorConfig;
import com.netflix.spectator.atlas.impl.PublishPayload;
import com.netflix.spectator.impl.Scheduler;
import com.netflix.spectator.ipc.http.HttpClient;
import javax.inject.Inject;
import javax.inject.Singleton;
import java.net.URI;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.TreeMap;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
/**
* Registry for reporting metrics to Atlas.
*/
@Singleton
public final class AtlasRegistry extends AbstractRegistry implements AutoCloseable {
private static final String PUBLISH_TASK_TIMER = "spectator.atlas.publishTaskTime";
private final Clock stepClock;
private final AtlasConfig config;
private final Duration step;
private final long stepMillis;
private final long meterTTL;
private final URI uri;
private final Duration lwcStep;
private final long lwcStepMillis;
private final Duration configRefreshFrequency;
private final URI evalUri;
private final int batchSize;
private final int numThreads;
private final Map<String, String> commonTags;
private final Registry debugRegistry;
private final RollupPolicy rollupPolicy;
private final Publisher publisher;
private Scheduler scheduler;
private final SubscriptionManager subManager;
private final Evaluator evaluator;
private final boolean parallelPolling;
private long lastPollTimestamp = -1L;
private long lastFlushTimestamp = -1L;
private final ConcurrentHashMap<Id, Consolidator> atlasMeasurements = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, Lock> publishTaskLocks = new ConcurrentHashMap<>();
/** Create a new instance. */
@Inject
public AtlasRegistry(Clock clock, AtlasConfig config) {
this(clock, config, null);
}
/** Create a new instance. Should only be used directly in tests. */
AtlasRegistry(Clock clock, AtlasConfig config, HttpClient client) {
super(new OverridableClock(clock), config);
this.config = config;
this.stepClock = new StepClock(clock, config.lwcStep().toMillis());
this.step = config.step();
this.stepMillis = step.toMillis();
this.meterTTL = config.meterTTL().toMillis();
this.uri = URI.create(config.uri());
this.lwcStep = config.lwcStep();
this.lwcStepMillis = lwcStep.toMillis();
if (lwcStepMillis > stepMillis) {
throw new IllegalArgumentException(
"lwcStep cannot be larger than step (" + lwcStep + " > " + step + ")");
}
if (stepMillis % lwcStepMillis != 0) {
throw new IllegalArgumentException(
"step is not an even multiple of lwcStep (" + step + " % " + lwcStep + " != 0)");
}
this.configRefreshFrequency = config.configRefreshFrequency();
this.evalUri = URI.create(config.evalUri());
this.batchSize = config.batchSize();
this.numThreads = config.numThreads();
this.commonTags = new TreeMap<>(config.commonTags());
this.debugRegistry = Optional.ofNullable(config.debugRegistry()).orElse(this);
this.rollupPolicy = config.rollupPolicy();
HttpClient httpClient = client != null ? client : HttpClient.create(debugRegistry);
Publisher pub = config.publisher();
this.publisher = pub == null ? new DefaultPublisher(config, httpClient, debugRegistry) : pub;
EvaluatorConfig evalConfig = EvaluatorConfig.fromAtlasConfig(config);
this.subManager = new SubscriptionManager(new ObjectMapper(), httpClient, clock, config);
this.evaluator = new Evaluator(evalConfig);
this.parallelPolling = evalConfig.parallelMeasurementPolling();
if (config.autoStart()) {
start();
}
}
/**
* Start the scheduler to collect metrics data.
*/
public void start() {
if (scheduler == null) {
logger.info("common tags: {}", commonTags);
publisher.init();
// Setup main collection for publishing to Atlas
Scheduler.Options options = new Scheduler.Options()
.withFrequency(Scheduler.Policy.FIXED_RATE_SKIP_IF_LONG, step)
.withInitialDelay(Duration.ofMillis(config.initialPollingDelay(clock(), stepMillis)))
.withStopOnFailure(false);
scheduler = new Scheduler(debugRegistry, "spectator-reg-atlas", numThreads);
scheduler.schedule(options, this::sendToAtlas);
logger.info("started collecting metrics every {} reporting to {}", step, uri);
// Setup collection for LWC
Scheduler.Options lwcOptions = new Scheduler.Options()
.withFrequency(Scheduler.Policy.FIXED_RATE_SKIP_IF_LONG, lwcStep)
.withInitialDelay(Duration.ofMillis(config.initialPollingDelay(clock(), lwcStepMillis)))
.withStopOnFailure(false);
scheduler.schedule(lwcOptions, this::sendToLWC);
// Setup refresh of LWC subscription data
Scheduler.Options subOptions = new Scheduler.Options()
.withFrequency(Scheduler.Policy.FIXED_DELAY, configRefreshFrequency)
.withStopOnFailure(false);
scheduler.schedule(subOptions, this::fetchSubscriptions);
} else {
logger.warn("registry already started, ignoring duplicate request");
}
}
/**
* Stop the scheduler reporting Atlas data.
*/
public void stop() {
if (scheduler == null) {
logger.warn("registry stopped, but was never started");
} else {
// Shutdown background tasks to collect data
scheduler.shutdown();
scheduler = null;
logger.info("stopped collecting metrics every {}ms reporting to {}", step, uri);
// Flush data to Atlas
try {
// Get current time at start to avoid drift while flushing
OverridableClock overridableClock = (OverridableClock) clock();
long now = clock().wallTime();
overridableClock.setWallTime(now); // use set time rather than underlying clock
// Data for the previous interval may not have already been written, go ahead and
// try to write it out
logger.info("flushing data for previous interval to Atlas");
sendToAtlas();
// Move to end of next interval and ensure it gets written out
logger.info("flushing data for final interval to Atlas");
overridableClock.setWallTime(now / lwcStepMillis * lwcStepMillis + lwcStepMillis);
pollMeters(overridableClock.wallTime());
overridableClock.setWallTime(now / stepMillis * stepMillis + stepMillis);
sendToAtlas();
} catch (Exception e) {
logger.warn("failed to flush data to Atlas", e);
}
// Shutdown publisher used for sending metrics
try {
publisher.close();
} catch (Exception e) {
logger.debug("failed to cleanly shutdown publisher");
}
}
}
/**
* Stop the scheduler reporting Atlas data. This is the same as calling {@link #stop()} and
* is included to allow the registry to be stopped correctly when used with DI frameworks that
* support lifecycle management.
*/
@Override public void close() {
stop();
}
/** Returns the timestamp of the last completed interval for the specified step size. */
long lastCompletedTimestamp(long s) {
long now = clock().wallTime();
return now / s * s;
}
private Timer publishTaskTimer(String id) {
return debugRegistry.timer(PUBLISH_TASK_TIMER, "id", id);
}
private void timePublishTask(String id, Runnable task) {
timePublishTask(id, id, task);
}
private void timePublishTask(String id, String lockName, Runnable task) {
publishTaskTimer(id).record(() -> {
Lock lock = publishTaskLocks.computeIfAbsent(lockName, n -> new ReentrantLock());
lock.lock();
try {
task.run();
} finally {
lock.unlock();
}
});
}
void sendToAtlas() {
timePublishTask("sendToAtlas", () -> {
if (config.enabled()) {
long t = lastCompletedTimestamp(stepMillis);
if (t > lastFlushTimestamp) {
pollMeters(t);
logger.debug("sending to Atlas for time: {}", t);
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (RollupPolicy.Result batch : getBatches(t)) {
PublishPayload p = new PublishPayload(batch.commonTags(), batch.measurements());
CompletableFuture<Void> future = publisher.publish(p);
futures.add(future);
}
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();
lastFlushTimestamp = t;
} else {
logger.debug("skipping duplicate flush attempt for time: {}", t);
}
} else {
logger.debug("publishing is disabled, skipping collection");
}
// Clean up any expired meters, do this regardless of whether it is enabled to avoid
// a memory leak
removeExpiredMeters();
});
}
void sendToLWC() {
timePublishTask("sendToLWC", () -> {
long t = lastCompletedTimestamp(lwcStepMillis);
//if (config.enabled() || config.lwcEnabled()) {
// If either are enabled we poll the meters for each step interval to flush the
// data into the consolidator
// NOTE: temporarily to avoid breaking some internal use-cases, the meters will always
// be polled to ensure the consolidated values for the main publishing path will be
// correct. Once those use-cases have been transitioned the condition should be enabled
// again.
pollMeters(t);
//}
if (config.lwcEnabled()) {
logger.debug("sending to LWC for time: {}", t);
try {
EvalPayload payload = evaluator.eval(t);
if (!payload.getMetrics().isEmpty()) {
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (EvalPayload batch : payload.toBatches(batchSize)) {
CompletableFuture<Void> future = publisher.publish(batch);
futures.add(future);
}
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();
}
} catch (Exception e) {
logger.warn("failed to send metrics for subscriptions (uri={})", evalUri, e);
}
} else {
logger.debug("lwc is disabled, skipping subscriptions");
}
});
}
/** Collect measurements from all the meters in the registry. */
void pollMeters(long t) {
timePublishTask("pollMeters", "atlasMeasurements", () -> {
if (t > lastPollTimestamp) {
MeasurementConsumer consumer = (id, timestamp, value) -> {
// Update the map for data to go to the Atlas storage layer
Consolidator consolidator = Utils.computeIfAbsent(atlasMeasurements, id, k -> {
int multiple = (int) (stepMillis / lwcStepMillis);
return Consolidator.create(k, stepMillis, multiple);
});
consolidator.update(timestamp, value);
// Update aggregators for streaming
evaluator.update(id, timestamp, value);
};
logger.debug("collecting measurements for time: {}", t);
publishTaskTimer("pollMeasurements").record(() -> StreamSupport
.stream(spliterator(), parallelPolling)
.forEach(meter -> ((AtlasMeter) meter).measure(t, consumer)));
lastPollTimestamp = t;
}
});
}
/**
* Removes expired meters from the registry. This is public to allow some integration
* from third parties. Behavior may change in the future. It is strongly advised to only
* interact with AtlasRegistry using the interface provided by Registry.
*/
@SuppressWarnings("PMD.UselessOverridingMethod")
@Override public void removeExpiredMeters() {
long now = clock().wallTime();
int total = 0;
int expired = 0;
Iterator<Meter> it = iterator();
while (it.hasNext()) {
++total;
AtlasMeter m = (AtlasMeter) it.next();
if (m.hasExpired(now)) {
++expired;
it.remove();
}
}
logger.debug("removed {} expired meters out of {} total", expired, total);
cleanupCachedState();
}
private void fetchSubscriptions() {
if (config.lwcEnabled()) {
subManager.refresh();
evaluator.sync(subManager.subscriptions());
} else {
logger.debug("lwc is disabled, skipping subscription config refresh");
}
}
/**
* Get a list of all consolidated measurements intended to be sent to Atlas and break them
* into batches.
*/
List<RollupPolicy.Result> getBatches(long t) {
final int n = atlasMeasurements.size();
final List<RollupPolicy.Result> batches = new ArrayList<>(n / batchSize + 1);
timePublishTask("getBatches", "atlasMeasurements", () -> {
debugRegistry.distributionSummary("spectator.registrySize").record(n);
List<Measurement> input = new ArrayList<>(n);
Iterator<Map.Entry<Id, Consolidator>> it = atlasMeasurements.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<Id, Consolidator> entry = it.next();
Consolidator consolidator = entry.getValue();
// Ensure it has been updated for this interval
consolidator.update(t, Double.NaN);
// Add the measurement to the list
double v = consolidator.value(t);
if (!Double.isNaN(v)) {
input.add(new Measurement(entry.getKey(), t, v));
}
// Clean up if there is no longer a need to preserve the state for this id
if (consolidator.isEmpty()) {
it.remove();
}
}
List<RollupPolicy.Result> results = rollupPolicy.apply(input);
int rollupSize = results.stream().mapToInt(r -> r.measurements().size()).sum();
debugRegistry.distributionSummary("spectator.rollupResultSize").record(rollupSize);
// Rollup policy can result multiple sets of metrics with different common tags. Batches
// are computed using sets with the same common tags. This avoids needing to merge the
// common tags into the ids and the larger payloads that would result from replicating them
// on all measurements.
for (RollupPolicy.Result result : results) {
List<Measurement> ms = result.measurements();
for (int i = 0; i < ms.size(); i += batchSize) {
List<Measurement> batch = ms.subList(i, Math.min(ms.size(), i + batchSize));
batches.add(new RollupPolicy.Result(result.commonTags(), batch));
}
}
});
return batches;
}
@Override public Stream<Measurement> measurements() {
long t = lastCompletedTimestamp(stepMillis);
pollMeters(t);
removeExpiredMeters();
// Return the flattened list of measurements. Do not merge common tags into the result
// as that is an internal detail and not expected by the user.
return getBatches(t).stream().flatMap(r -> r.measurements().stream());
}
@Override protected Counter newCounter(Id id) {
return new AtlasCounter(id, clock(), meterTTL, lwcStepMillis);
}
@Override protected DistributionSummary newDistributionSummary(Id id) {
return new AtlasDistributionSummary(id, clock(), meterTTL, lwcStepMillis);
}
@Override protected Timer newTimer(Id id) {
return new AtlasTimer(id, clock(), meterTTL, lwcStepMillis);
}
@Override protected Gauge newGauge(Id id) {
// Be sure to get StepClock so the measurements will have step aligned
// timestamps.
return new AtlasGauge(id, stepClock, meterTTL);
}
@Override protected Gauge newMaxGauge(Id id) {
return new AtlasMaxGauge(id, clock(), meterTTL, lwcStepMillis);
}
}
| 6,061 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/AtlasConfig.java | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.RegistryConfig;
import java.time.Duration;
import java.util.Collections;
import java.util.Map;
/**
* Configuration for Atlas registry.
*/
public interface AtlasConfig extends RegistryConfig {
/**
* Returns the step size (reporting frequency) to use. The default is 1 minute.
*/
default Duration step() {
String v = get("atlas.step");
return (v == null) ? Duration.ofMinutes(1) : Duration.parse(v);
}
/**
* Returns the TTL for meters that do not have any activity. After this period the meter
* will be considered expired and will not get reported. Default is 15 minutes.
*/
default Duration meterTTL() {
String v = get("atlas.meterTTL");
return (v == null) ? Duration.ofMinutes(15) : Duration.parse(v);
}
/**
* Returns true if publishing to Atlas is enabled. Default is true.
*/
default boolean enabled() {
String v = get("atlas.enabled");
return v == null || Boolean.parseBoolean(v);
}
/**
* Returns true if the registry should automatically start the background reporting threads
* in the constructor. When using DI systems this can be used to automatically start the
* registry when it is constructed. Otherwise the {@code AtlasRegistry.start()} method will
* need to be called explicitly. Default is false.
*/
default boolean autoStart() {
String v = get("atlas.autoStart");
return v != null && Boolean.parseBoolean(v);
}
/**
* Returns the number of threads to use with the scheduler. The default is
* 4 threads.
*/
default int numThreads() {
String v = get("atlas.numThreads");
return (v == null) ? 4 : Integer.parseInt(v);
}
/**
* Returns the URI for the Atlas backend. The default is
* {@code http://localhost:7101/api/v1/publish}.
*/
default String uri() {
String v = get("atlas.uri");
return (v == null) ? "http://localhost:7101/api/v1/publish" : v;
}
/**
* Returns the step size (reporting frequency) to use for streaming to Atlas LWC.
* The default is 5s. This is the highest resolution that would be supported for getting
* an on-demand stream of the data. It must be less than or equal to the Atlas step
* ({@link #step()}) and the Atlas step should be an even multiple of this value.
*/
default Duration lwcStep() {
String v = get("atlas.lwc.step");
return (v == null) ? Duration.ofSeconds(5) : Duration.parse(v);
}
/**
* Returns true if streaming to Atlas LWC is enabled. Default is false.
*/
default boolean lwcEnabled() {
String v = get("atlas.lwc.enabled");
return v != null && Boolean.parseBoolean(v);
}
/**
* Returns true if expressions with the same step size as Atlas publishing should be
* ignored for streaming. This is used for cases where data being published to Atlas
* is also sent into streaming from the backend. Default is true.
*/
default boolean lwcIgnorePublishStep() {
String v = get("atlas.lwc.ignore-publish-step");
return v == null || Boolean.parseBoolean(v);
}
/** Returns the frequency for refreshing config settings from the LWC service. */
default Duration configRefreshFrequency() {
String v = get("atlas.configRefreshFrequency");
return (v == null) ? Duration.ofSeconds(10) : Duration.parse(v);
}
/** Returns the TTL for subscriptions from the LWC service. */
default Duration configTTL() {
return configRefreshFrequency().multipliedBy(15);
}
/**
* Returns the URI for the Atlas LWC endpoint to retrieve current subscriptions.
* The default is {@code http://localhost:7101/lwc/api/v1/expressions/local-dev}.
*/
default String configUri() {
String v = get("atlas.config-uri");
return (v == null) ? "http://localhost:7101/lwc/api/v1/expressions/local-dev" : v;
}
/**
* Returns the URI for the Atlas LWC endpoint to evaluate the data for a suscription.
* The default is {@code http://localhost:7101/lwc/api/v1/evaluate}.
*/
default String evalUri() {
String v = get("atlas.eval-uri");
return (v == null) ? "http://localhost:7101/lwc/api/v1/evaluate" : v;
}
/**
* Returns the connection timeout for requests to the backend. The default is
* 1 second.
*/
default Duration connectTimeout() {
String v = get("atlas.connectTimeout");
return (v == null) ? Duration.ofSeconds(1) : Duration.parse(v);
}
/**
* Returns the read timeout for requests to the backend. The default is
* 10 seconds.
*/
default Duration readTimeout() {
String v = get("atlas.readTimeout");
return (v == null) ? Duration.ofSeconds(10) : Duration.parse(v);
}
/**
* Returns the number of measurements per request to use for the backend. If more
* measurements are found, then multiple requests will be made. The default is
* 10,000.
*/
default int batchSize() {
String v = get("atlas.batchSize");
return (v == null) ? 10000 : Integer.parseInt(v);
}
/**
* Returns the common tags to apply to all metrics reported to Atlas. The returned tags
* must only use valid characters as defined by {@link #validTagCharacters()}. The default
* is an empty map.
*/
default Map<String, String> commonTags() {
return Collections.emptyMap();
}
/**
* Returns a pattern indicating the valid characters for a tag key or value. The default is
* {@code -._A-Za-z0-9~^}.
*/
default String validTagCharacters() {
return "-._A-Za-z0-9~^";
}
/**
* Returns a map from tag key to a pattern indicating the valid characters for the values
* of that key. The default is an empty map.
*
* @deprecated This method is no longer used internally.
*/
@Deprecated
default Map<String, String> validTagValueCharacters() {
return Collections.emptyMap();
}
/**
* Returns a registry to use for recording metrics about the behavior of the AtlasRegistry.
* By default it will return null and the metrics will be reported to itself. In some cases
* it is useful to customize this for debugging so that the metrics for the behavior of
* AtlasRegistry will have a different failure mode than AtlasRegistry.
*/
default Registry debugRegistry() {
return null;
}
/**
* Returns a rollup policy that will be applied to the measurements before sending to Atlas.
* The policy will not be applied to data going to the streaming path. Default is a no-op
* policy.
*/
default RollupPolicy rollupPolicy() {
return RollupPolicy.noop(commonTags());
}
/**
* Avoid collecting right on boundaries to minimize transitions on step longs
* during a collection. By default it will randomly distribute across the middle
* of the step interval.
*/
default long initialPollingDelay(Clock clock, long stepSize) {
long now = clock.wallTime();
long stepBoundary = now / stepSize * stepSize;
// Buffer by 10% of the step interval on either side
long offset = stepSize / 10;
// For larger intervals spread it out, otherwise bias towards the start
// to ensure there is plenty of time to send without needing to cross over
// to the next interval. The threshold of 1s was chosen because it is typically
// big enough to avoid GC troubles where it is common to see pause times in the
// low 100s of milliseconds.
if (offset >= 1000L) {
// Check if the current delay is within the acceptable range
long delay = now - stepBoundary;
if (delay < offset) {
return delay + offset;
} else {
return Math.min(delay, stepSize - offset);
}
} else {
long firstTime = stepBoundary + stepSize / 10;
return firstTime > now
? firstTime - now
: firstTime + stepSize - now;
}
}
/**
* <strong>Alpha:</strong> this method is experimental and may change or be completely
* removed with no notice.
*
* Override to provide a custom publisher for sending data to Atlas. The intended use is
* for some cases where it is desirable to send the payload somewhere else or to use an
* alternate client. If the return value is null, then the data will be sent via the normal
* path.
*/
default Publisher publisher() {
return null;
}
}
| 6,062 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/AtlasMaxGauge.java | /*
* Copyright 2014-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Statistic;
import com.netflix.spectator.impl.StepDouble;
/**
* Gauge that reports the maximum value submitted during an interval to Atlas. Main use-case
* right now is for allowing the max stat used internally to AtlasDistributionSummary and
* AtlasTimer to be transferred to a remote AtlasRegistry.
*/
class AtlasMaxGauge extends AtlasMeter implements Gauge {
private final StepDouble value;
private final Id stat;
/** Create a new instance. */
AtlasMaxGauge(Id id, Clock clock, long ttl, long step) {
super(id, clock, ttl);
// Initialize to NaN so that it can be used with negative values
this.value = new StepDouble(Double.NaN, clock, step);
// Add the statistic for typing. Re-adding the tags from the id is to retain
// the statistic from the id if it was already set
this.stat = AtlasMeter.addIfMissing(id, Statistic.max, DsType.gauge);
}
@Override void measure(long now, MeasurementConsumer consumer) {
// poll needs to be called before accessing the timestamp to ensure
// the counters have been rotated if there was no activity in the
// current interval.
double v = value.poll(now);
if (Double.isFinite(v)) {
consumer.accept(stat, value.timestamp(), v);
}
}
@Override public void set(double v) {
long now = clock.wallTime();
value.getCurrent(now).max(v);
updateLastModTime(now);
}
@Override public double value() {
return value.poll();
}
}
| 6,063 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/OverridableClock.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Clock;
/**
* Wraps a clock implementation to allow overriding the wall clock time. This is typically
* used to adjust the clock to the next step boundary during shutdown.
*/
class OverridableClock implements Clock {
private final Clock impl;
private long timestamp;
/** Create a new instance. */
OverridableClock(Clock impl) {
this.impl = impl;
this.timestamp = -1L;
}
/** Set the wall time to use. */
void setWallTime(long timestamp) {
this.timestamp = timestamp;
}
@Override
public long wallTime() {
return timestamp != -1L ? timestamp : impl.wallTime();
}
@Override
public long monotonicTime() {
return impl.monotonicTime();
}
}
| 6,064 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/RollupPolicy.java | /*
* Copyright 2014-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.impl.Preconditions;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;
/**
* Policy for performing a rollup on a set of measurements. This typically involves
* removing some dimensions from the ids and combining the results into an aggregate
* measurement.
*/
public interface RollupPolicy extends Function<List<Measurement>, List<RollupPolicy.Result>> {
/** Does nothing, returns the input list without modification. */
static RollupPolicy noop(Map<String, String> commonTags) {
return ms -> Collections.singletonList(new Result(commonTags, ms));
}
/**
* Create a new policy that will aggregate ids based on the statistic tag. Counter types
* will use a sum aggregation and gauges will use max.
*
* @param commonTags
* Common tags that are applied to all measurements.
* @param idMapper
* Map an id to a new identifier that will be used for the resulting aggregate measurement.
* @return
* A rollup policy that will apply the mapping function to the ids of input measurements
* and aggregate the results.
*/
static RollupPolicy fromIdMapper(Map<String, String> commonTags, Function<Id, Id> idMapper) {
Function<Id, Id> mapper = commonTags.isEmpty()
? idMapper
: id -> idMapper.apply(id.withTags(commonTags));
return ms -> Collections.singletonList(new Result(Rollups.aggregate(mapper, ms)));
}
/**
* Create a new policy based on a list of rules. A rule consists of an Atlas query expression
* and a set of dimensions that should be removed for matching measurements.
*
* @param commonTags
* Set of common tags that are applied to all measurements.
* @param rules
* List of rules for specifying what dimensions should be removed.
* @return
* A rollup policy that will apply the rules on the input measurements and aggregate the
* results.
*/
static RollupPolicy fromRules(Map<String, String> commonTags, List<Rule> rules) {
return Rollups.fromRules(commonTags, rules);
}
/**
* Rule for matching a set of measurements and removing specified dimensions.
*/
final class Rule {
private final String query;
private final List<String> rollup;
/**
* Create a new instance.
*
* @param query
* Atlas query expression that indicates the set of measurements matching this rule.
* @param rollup
* Set of dimensions to remove from the matching measurements.
*/
public Rule(String query, List<String> rollup) {
this.query = Preconditions.checkNotNull(query, "query");
this.rollup = Preconditions.checkNotNull(rollup, "rollup");
}
/** Return the query expression string. */
public String query() {
return query;
}
/** Return the set of dimensions to remove. */
public List<String> rollup() {
return rollup;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Rule)) return false;
Rule rule = (Rule) o;
return query.equals(rule.query)
&& rollup.equals(rule.rollup);
}
@Override
public int hashCode() {
return Objects.hash(query, rollup);
}
}
/** Result of applying the rollup policy. */
final class Result {
private final Map<String, String> commonTags;
private final List<Measurement> measurements;
/** Create a new instance. */
public Result(List<Measurement> measurements) {
this(Collections.emptyMap(), measurements);
}
/**
* Create a new instance.
*
* @param commonTags
* Common tags that should be applied to all measurements in this result.
* @param measurements
* Measurments aggregated according to the policy.
*/
public Result(Map<String, String> commonTags, List<Measurement> measurements) {
this.commonTags = commonTags;
this.measurements = measurements;
}
/** Return the common tags for this result. */
public Map<String, String> commonTags() {
return commonTags;
}
/** Return the measurements for this result. */
public List<Measurement> measurements() {
return measurements;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Result)) return false;
Result result = (Result) o;
return commonTags.equals(result.commonTags)
&& measurements.equals(result.measurements);
}
@Override
public int hashCode() {
return Objects.hash(commonTags, measurements);
}
}
}
| 6,065 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/AtlasGauge.java | /*
* Copyright 2014-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Statistic;
import com.netflix.spectator.impl.AtomicDouble;
/**
* Meter that reports a single value to Atlas.
*/
class AtlasGauge extends AtlasMeter implements Gauge {
private final AtomicDouble value;
private final Id stat;
/** Create a new instance. */
AtlasGauge(Id id, Clock clock, long ttl) {
super(id, clock, ttl);
this.value = new AtomicDouble(0.0);
// Add the statistic for typing. Re-adding the tags from the id is to retain
// the statistic from the id if it was already set
this.stat = AtlasMeter.addIfMissing(id, Statistic.gauge, DsType.gauge);
}
@Override void measure(long now, MeasurementConsumer consumer) {
final double v = value();
if (Double.isFinite(v)) {
consumer.accept(stat, clock.wallTime(), v);
}
}
@Override public void set(double v) {
value.set(v);
updateLastModTime(clock.wallTime());
}
@Override public double value() {
return value.get();
}
}
| 6,066 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/Publisher.java | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.atlas.impl.EvalPayload;
import com.netflix.spectator.atlas.impl.PublishPayload;
import java.io.Closeable;
import java.util.concurrent.CompletableFuture;
/**
* <strong>Alpha:</strong> this method is experimental and may change or be completely
* removed with no notice.
*
* Publisher for submitting data to Atlas.
*/
public interface Publisher extends Closeable {
/** Initialize the publisher and get it ready to send data. */
void init();
/** Send a payload to an Atlas backend. */
CompletableFuture<Void> publish(PublishPayload payload);
/** Send a evaluation payload to an Atlas LWC service. */
CompletableFuture<Void> publish(EvalPayload payload);
}
| 6,067 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/DsType.java | /*
* Copyright 2014-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Tag;
/**
* Data source type for Atlas. See the
* <a href="https://github.com/Netflix/atlas/wiki/Concepts#normalization">normalization</a>
* docs for more information.
*/
enum DsType implements Tag {
/** Sampled value that should be used as is without weighting. */
gauge,
/** Rate per second that should use weighted averaging during normalization. */
rate,
/**
* Sum type used for inline aggregations on the backend. Sender must be careful to avoid
* overcounting since the backend cannot dedup.
*/
sum;
@Override public String key() {
return "atlas.dstype";
}
@Override public String value() {
return name();
}
}
| 6,068 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/StepClock.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Clock;
/**
* Wraps a clock implementation with one that only reports wall times on
* exact boundaries of the step. This is used so that measurements sampled
* from gauges will all have the same timestamp for a given reporting
* interval.
*/
class StepClock implements Clock {
private final Clock impl;
private final long step;
/** Create a new instance. */
StepClock(Clock impl, long step) {
this.impl = impl;
this.step = step;
}
@Override
public long wallTime() {
return impl.wallTime() / step * step;
}
@Override
public long monotonicTime() {
return impl.monotonicTime();
}
}
| 6,069 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/Consolidator.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Statistic;
import com.netflix.spectator.api.Utils;
import com.netflix.spectator.impl.Preconditions;
/**
* Consolidates a set of measurements collected at a smaller step size to a set a measurement
* at a larger step size that is an even multiple of the primary.
*/
public interface Consolidator {
/**
* Update the state with a new primary datapoint.
*
* @param t
* Timestamp for the new value.
* @param v
* Value to include in the consolidated aggregate. If there is no new measurement, then
* {@code NaN} can be used to force the completion of the consolidated value.
*/
void update(long t, double v);
/**
* Update the state with a new primary datapoint. See {@link #update(long, double)} for more
* details.
*/
default void update(Measurement m) {
update(m.timestamp(), m.value());
}
/**
* Return the consolidated value for the specified timestamp. The timestamp should be for the
* last completed interval.
*/
double value(long t);
/**
* Returns true if the state is the same as it would be if a new instance of the consolidator
* was created. This can be used to check if the current instance can be garbage collected.
*/
boolean isEmpty();
/** Returns true if this consolidator is intended for gauge values. */
boolean isGauge();
/**
* Create a new consolidator instance based on the statistic in the id. If not statistic tag
* is present, then it will be treated as a gauge.
*
* @param id
* Id for the measurement to consolidate.
* @param step
* Consolidated step size.
* @param multiple
* Multiple for the consolidate step size. The primary step is {@code step / multiple}.
* @return
* A new consolidator instance.
*/
static Consolidator create(Id id, long step, int multiple) {
return create(Utils.getTagValue(id, "statistic"), step, multiple);
}
/**
* Create a new consolidator instance based on the specified statistic.
*
* @param statistic
* Statistic used to determine which consolidation function to use.
* @param step
* Consolidated step size.
* @param multiple
* Multiple for the consolidate step size. The primary step is {@code step / multiple}.
* @return
* A new consolidator instance.
*/
static Consolidator create(Statistic statistic, long step, int multiple) {
return create(statistic.name(), step, multiple);
}
/**
* Create a new consolidator instance based on the specified statistic.
*
* @param statistic
* Statistic used to determine which consolidation function to use. If the statistic is
* null or unknown, the it will be treated as a gauge.
* @param step
* Consolidated step size.
* @param multiple
* Multiple for the consolidate step size. The primary step is {@code step / multiple}.
* @return
* A new consolidator instance.
*/
static Consolidator create(String statistic, long step, int multiple) {
boolean gauge = isGauge(statistic);
if (multiple == 1) {
return new None(gauge);
}
return gauge ? new Max(step, multiple) : new Avg(step, multiple);
}
static boolean isGauge(String statistic) {
switch (statistic == null ? "gauge" : statistic) {
case "count":
case "totalAmount":
case "totalTime":
case "totalOfSquares":
case "percentile":
return false;
default:
return true;
}
}
/**
* Placeholder implementation used when the primary and consolidated step sizes are
* the same.
*/
final class None implements Consolidator {
private long timestamp;
private double value;
private boolean gauge;
None(boolean gauge) {
this.timestamp = -1L;
this.value = Double.NaN;
this.gauge = gauge;
}
@Override public void update(long t, double v) {
if (t > timestamp) {
timestamp = t;
value = v;
}
}
@Override public double value(long t) {
return timestamp == t ? value : Double.NaN;
}
@Override public boolean isEmpty() {
return Double.isNaN(value);
}
@Override public boolean isGauge() {
return gauge;
}
}
/** Base class for consolidator implementations. */
abstract class AbstractConsolidator implements Consolidator {
/** Consolidated step size. */
protected final long step;
/** Multiple from primary to consolidated step. */
protected final int multiple;
private long timestamp;
private double current;
private double previous;
AbstractConsolidator(long step, int multiple) {
Preconditions.checkArg(step > 0L, "step must be > 0");
Preconditions.checkArg(multiple > 0L, "multiple must be > 0");
this.step = step;
this.multiple = multiple;
this.timestamp = -1L;
this.current = Double.NaN;
this.previous = Double.NaN;
}
private long roundToConsolidatedStep(long t) {
return (t % step == 0L) ? t : t / step * step + step;
}
/** Combines two values to create an aggregate used as the consolidated value. */
protected abstract double aggregate(double v1, double v2);
/** Performs any final computation on the aggregated value. */
protected abstract double complete(double v);
@Override public void update(long rawTimestamp, double value) {
long t = roundToConsolidatedStep(rawTimestamp);
if (timestamp < 0) {
timestamp = t;
}
if (t == timestamp) {
// Updating the same datapoint, just apply the update
current = aggregate(current, value);
if (rawTimestamp == timestamp) {
// On the boundary, roll the value
previous = complete(current);
current = Double.NaN;
timestamp = t + step;
}
} else if (t > timestamp) {
if (t - timestamp == step) {
// Previous time interval
previous = complete(current);
} else {
// Gap in the data, clear out the previous sample
previous = Double.NaN;
}
current = value;
timestamp = t;
}
}
@Override public double value(long t) {
return (timestamp - t == step) ? previous : Double.NaN;
}
@Override public boolean isEmpty() {
return Double.isNaN(previous) && Double.isNaN(current);
}
}
/**
* Averages the raw values. The denominator will always be the multiple so missing data
* for some intervals will be treated as a zero. For counters this should give an accurate
* average rate per second across the consolidated interval.
*/
final class Avg extends AbstractConsolidator {
Avg(long step, int multiple) {
super(step, multiple);
}
@Override protected double aggregate(double v1, double v2) {
return Double.isNaN(v1) ? v2 : Double.isNaN(v2) ? v1 : v1 + v2;
}
@Override protected double complete(double v) {
return v / multiple;
}
@Override public boolean isGauge() {
return false;
}
}
/**
* Selects the maximum value that is reported. Used for max gauges and similar types to
* preserve the overall max.
*/
final class Max extends AbstractConsolidator {
Max(long step, int multiple) {
super(step, multiple);
}
@Override protected double aggregate(double v1, double v2) {
return Double.isNaN(v1) ? v2 : Double.isNaN(v2) ? v1 : Math.max(v1, v2);
}
@Override protected double complete(double v) {
return v;
}
@Override public boolean isGauge() {
return true;
}
}
}
| 6,070 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/Parser.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.impl.matcher.PatternUtils;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Deque;
import java.util.HashSet;
import java.util.List;
/**
* Parses an Atlas data or query expression.
*
* <b>Classes in this package are only intended for use internally within spectator. They may
* change at any time and without notice.</b>
*/
public final class Parser {
private Parser() {
}
/**
* Parse an <a href="https://github.com/Netflix/atlas/wiki/Reference-data">Atlas data
* expression</a>.
*/
public static DataExpr parseDataExpr(String expr) {
try {
return (DataExpr) parse(expr);
} catch (ClassCastException e) {
throw new IllegalArgumentException("invalid data expression: " + expr, e);
}
}
/**
* Parse an <a href="https://github.com/Netflix/atlas/wiki/Reference-query">Atlas query
* expression</a>.
*/
public static Query parseQuery(String expr) {
try {
return (Query) parse(expr);
} catch (ClassCastException e) {
throw new IllegalArgumentException("invalid query expression: " + expr, e);
}
}
@SuppressWarnings({"unchecked", "checkstyle:MethodLength", "PMD"})
private static Object parse(String expr) {
DataExpr.AggregateFunction af;
Query q, q1, q2;
String k, v;
int depth = 0;
List<String> tmp;
List<String> vs = null;
String[] parts = expr.split(",");
Deque<Object> stack = new ArrayDeque<>(parts.length);
for (String p : parts) {
String token = p.trim();
if (token.isEmpty()) {
continue;
}
if (vs != null && (depth > 0 || !")".equals(token))) {
if ("(".equals(token)) {
++depth;
} else if (")".equals(token)) {
--depth;
}
vs.add(token);
continue;
}
switch (token) {
case "(":
vs = new ArrayList<>();
break;
case ")":
if (vs == null) {
throw new IllegalArgumentException("unmatched closing paren: " + expr);
}
stack.push(vs);
vs = null;
depth = 0;
break;
case ":true":
stack.push(Query.TRUE);
break;
case ":false":
stack.push(Query.FALSE);
break;
case ":and":
q2 = (Query) stack.pop();
q1 = (Query) stack.pop();
stack.push(q1.and(q2));
break;
case ":or":
q2 = (Query) stack.pop();
q1 = (Query) stack.pop();
stack.push(q1.or(q2));
break;
case ":not":
q = (Query) stack.pop();
stack.push(q.not());
break;
case ":has":
k = (String) stack.pop();
stack.push(new Query.Has(k));
break;
case ":eq":
v = (String) stack.pop();
k = (String) stack.pop();
stack.push(new Query.Equal(k, v));
break;
case ":in":
tmp = (List<String>) stack.pop();
k = (String) stack.pop();
pushIn(stack, k, tmp);
break;
case ":lt":
v = (String) stack.pop();
k = (String) stack.pop();
stack.push(new Query.LessThan(k, v));
break;
case ":le":
v = (String) stack.pop();
k = (String) stack.pop();
stack.push(new Query.LessThanEqual(k, v));
break;
case ":gt":
v = (String) stack.pop();
k = (String) stack.pop();
stack.push(new Query.GreaterThan(k, v));
break;
case ":ge":
v = (String) stack.pop();
k = (String) stack.pop();
stack.push(new Query.GreaterThanEqual(k, v));
break;
case ":re":
v = (String) stack.pop();
k = (String) stack.pop();
pushRegex(stack, new Query.Regex(k, v));
break;
case ":reic":
v = (String) stack.pop();
k = (String) stack.pop();
pushRegex(stack, new Query.Regex(k, v, true, ":reic"));
break;
case ":contains":
v = (String) stack.pop();
k = (String) stack.pop();
pushRegex(stack, new Query.Regex(k, ".*" + PatternUtils.escape(v)));
break;
case ":starts":
v = (String) stack.pop();
k = (String) stack.pop();
pushRegex(stack, new Query.Regex(k, PatternUtils.escape(v)));
break;
case ":ends":
v = (String) stack.pop();
k = (String) stack.pop();
pushRegex(stack, new Query.Regex(k, ".*" + PatternUtils.escape(v) + "$"));
break;
case ":all":
q = (Query) stack.pop();
stack.push(new DataExpr.All(q));
break;
case ":sum":
q = (Query) stack.pop();
stack.push(new DataExpr.Sum(q));
break;
case ":min":
q = (Query) stack.pop();
stack.push(new DataExpr.Min(q));
break;
case ":max":
q = (Query) stack.pop();
stack.push(new DataExpr.Max(q));
break;
case ":count":
q = (Query) stack.pop();
stack.push(new DataExpr.Count(q));
break;
case ":by":
tmp = (List<String>) stack.pop();
af = (DataExpr.AggregateFunction) stack.pop();
stack.push(new DataExpr.GroupBy(af, tmp));
break;
case ":rollup-drop":
tmp = (List<String>) stack.pop();
af = (DataExpr.AggregateFunction) stack.pop();
stack.push(new DataExpr.DropRollup(af, tmp));
break;
case ":rollup-keep":
tmp = (List<String>) stack.pop();
af = (DataExpr.AggregateFunction) stack.pop();
stack.push(new DataExpr.KeepRollup(af, tmp));
break;
default:
if (token.startsWith(":")) {
throw new IllegalArgumentException("unknown word '" + token + "'");
}
stack.push(token);
break;
}
}
Object obj = stack.pop();
if (!stack.isEmpty()) {
throw new IllegalArgumentException("too many items remaining on stack: " + stack);
}
return obj;
}
private static void pushRegex(Deque<Object> stack, Query.Regex q) {
if (q.alwaysMatches()) {
stack.push(new Query.Has(q.key()));
} else {
stack.push(q);
}
}
private static void pushIn(Deque<Object> stack, String k, List<String> values) {
if (values.size() == 1)
stack.push(new Query.Equal(k, values.get(0)));
else
stack.push(new Query.In(k, new HashSet<>(values)));
}
}
| 6,071 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/GzipLevelOutputStream.java | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import java.io.IOException;
import java.io.OutputStream;
import java.util.zip.Deflater;
import java.util.zip.GZIPOutputStream;
/** Wrap GZIPOutputStream to set the best speed compression level. */
final class GzipLevelOutputStream extends GZIPOutputStream {
/** Creates a new output stream with a best speed compression level. */
GzipLevelOutputStream(OutputStream outputStream) throws IOException {
super(outputStream);
def.setLevel(Deflater.BEST_SPEED);
}
}
| 6,072 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/QueryIndex.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.impl.Cache;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
/**
* Index that to efficiently match an {@link com.netflix.spectator.api.Id} against a set of
* queries that are known in advance. The index is thread safe for queries. Updates to the
* index should be done from a single thread at a time.
*/
@SuppressWarnings("PMD.LinguisticNaming")
public final class QueryIndex<T> {
/**
* Supplier to create a new instance of a cache used for other checks. The default should
* be fine for most uses, but heavy uses with many expressions and high throughput may
* benefit from an alternate implementation.
*/
public interface CacheSupplier<V> extends Supplier<Cache<String, List<QueryIndex<V>>>> {
}
/** Default supplier based on a simple LFU cache. */
public static class DefaultCacheSupplier<V> implements CacheSupplier<V> {
private final Registry registry;
DefaultCacheSupplier(Registry registry) {
this.registry = registry;
}
@Override
public Cache<String, List<QueryIndex<V>>> get() {
return Cache.lfu(registry, "QueryIndex", 100, 1000);
}
}
/**
* Return a new instance of an index that is empty. The default caching behavior will be
* used.
*/
public static <V> QueryIndex<V> newInstance(Registry registry) {
return newInstance(new DefaultCacheSupplier<>(registry));
}
/**
* Return a new instance of an index that is empty. The caches will be used to cache the
* results of regex or other checks to try and avoid scans with repeated string values
* across many ids.
*/
public static <V> QueryIndex<V> newInstance(CacheSupplier<V> cacheSupplier) {
return new QueryIndex<>(cacheSupplier, "name");
}
/**
* Return a new instance of an index that is empty and doesn't have an explicit key set.
* Used internally rather than {@link #newInstance(CacheSupplier)} which sets the key to {@code name}
* so the root node will be correct for traversing the id.
*/
private static <V> QueryIndex<V> empty(CacheSupplier<V> cacheSupplier) {
return new QueryIndex<>(cacheSupplier, null);
}
/**
* Compare the strings and put {@code name} first and then normally sort the other keys.
* This allows the {@link Id} to be traversed in order while performing the lookup.
*/
private static int compare(String k1, String k2) {
if ("name".equals(k1) && "name".equals(k2)) {
return 0;
} else if ("name".equals(k1)) {
return -1;
} else if ("name".equals(k2)) {
return 1;
} else {
return k1.compareTo(k2);
}
}
private final CacheSupplier<T> cacheSupplier;
private volatile String key;
private final ConcurrentHashMap<String, QueryIndex<T>> equalChecks;
private final ConcurrentHashMap<Query.KeyQuery, QueryIndex<T>> otherChecks;
private final PrefixTree<Query.KeyQuery> otherChecksTree;
private final Cache<String, List<QueryIndex<T>>> otherChecksCache;
private volatile QueryIndex<T> hasKeyIdx;
private volatile QueryIndex<T> otherKeysIdx;
private volatile QueryIndex<T> missingKeysIdx;
private final Set<T> matches;
/** Create a new instance. */
private QueryIndex(CacheSupplier<T> cacheSupplier, String key) {
this.cacheSupplier = cacheSupplier;
this.key = key;
this.equalChecks = new ConcurrentHashMap<>();
this.otherChecks = new ConcurrentHashMap<>();
this.otherChecksTree = new PrefixTree<>();
this.otherChecksCache = cacheSupplier.get();
this.hasKeyIdx = null;
this.otherKeysIdx = null;
this.missingKeysIdx = null;
this.matches = new CopyOnWriteArraySet<>();
}
private List<Query.KeyQuery> sort(Query query) {
List<Query.KeyQuery> result = new ArrayList<>();
for (Query q : query.andList()) {
result.add((Query.KeyQuery) q);
}
result.sort((q1, q2) -> compare(q1.key(), q2.key()));
return result;
}
/**
* Add a value that should match for the specified query.
*
* @param query
* Query that corresponds to the value.
* @param value
* Value to return for ids that match the query.
* @return
* This index so it can be used in a fluent manner.
*/
public QueryIndex<T> add(Query query, T value) {
for (Query q : query.dnfList()) {
if (q == Query.TRUE) {
matches.add(value);
} else if (q == Query.FALSE) {
break;
} else {
add(sort(q), 0, value);
}
}
return this;
}
private void add(List<Query.KeyQuery> queries, int i, T value) {
if (i < queries.size()) {
Query.KeyQuery kq = queries.get(i);
// Check for additional queries based on the same key and combine into a
// composite if needed
Query.CompositeKeyQuery composite = null;
int j = i + 1;
while (j < queries.size()) {
Query.KeyQuery q = queries.get(j);
if (kq.key().equals(q.key())) {
if (composite == null) {
composite = new Query.CompositeKeyQuery(kq);
kq = composite;
}
composite.add(q);
++j;
} else {
break;
}
}
if (key == null) {
key = kq.key();
}
if (key.equals(kq.key())) {
if (kq instanceof Query.Equal) {
String v = ((Query.Equal) kq).value();
QueryIndex<T> idx = equalChecks.computeIfAbsent(v, id -> QueryIndex.empty(cacheSupplier));
idx.add(queries, j, value);
} else if (kq instanceof Query.Has) {
if (hasKeyIdx == null) {
hasKeyIdx = QueryIndex.empty(cacheSupplier);
}
hasKeyIdx.add(queries, j, value);
} else {
QueryIndex<T> idx = otherChecks.computeIfAbsent(kq, id -> QueryIndex.empty(cacheSupplier));
idx.add(queries, j, value);
if (kq instanceof Query.Regex) {
Query.Regex re = (Query.Regex) kq;
otherChecksTree.put(re.pattern().prefix(), kq);
} else {
otherChecksTree.put(null, kq);
}
otherChecksCache.clear();
// Not queries should match if the key is missing from the id, so they need to
// be included in the other keys sub-tree as well. Check this by seeing if it will
// match an empty map as there could be a variety of inverted types.
if (kq.matches(Collections.emptyMap())) {
if (missingKeysIdx == null) {
missingKeysIdx = QueryIndex.empty(cacheSupplier);
}
missingKeysIdx.add(queries, j, value);
}
}
} else {
if (otherKeysIdx == null) {
otherKeysIdx = QueryIndex.empty(cacheSupplier);
}
otherKeysIdx.add(queries, i, value);
}
} else {
matches.add(value);
}
}
/**
* Remove the specified value associated with a specific query from the index. Returns
* true if a value was successfully removed.
*/
public boolean remove(Query query, T value) {
boolean result = false;
for (Query q : query.dnfList()) {
if (q == Query.TRUE) {
result |= matches.remove(value);
} else if (q == Query.FALSE) {
break;
} else {
result |= remove(sort(q), 0, value);
}
}
return result;
}
private boolean remove(List<Query.KeyQuery> queries, int i, T value) {
boolean result = false;
if (i < queries.size()) {
Query.KeyQuery kq = queries.get(i);
// Check for additional queries based on the same key and combine into a
// composite if needed
Query.CompositeKeyQuery composite = null;
int j = i + 1;
while (j < queries.size()) {
Query.KeyQuery q = queries.get(j);
if (kq.key().equals(q.key())) {
if (composite == null) {
composite = new Query.CompositeKeyQuery(kq);
kq = composite;
}
composite.add(q);
++j;
} else {
break;
}
}
if (key != null && key.equals(kq.key())) {
if (kq instanceof Query.Equal) {
String v = ((Query.Equal) kq).value();
QueryIndex<T> idx = equalChecks.get(v);
if (idx != null) {
result |= idx.remove(queries, j, value);
if (idx.isEmpty())
equalChecks.remove(v);
}
} else if (kq instanceof Query.Has) {
if (hasKeyIdx != null) {
result |= hasKeyIdx.remove(queries, j, value);
if (hasKeyIdx.isEmpty())
hasKeyIdx = null;
}
} else {
QueryIndex<T> idx = otherChecks.get(kq);
if (idx != null && idx.remove(queries, j, value)) {
result = true;
otherChecksCache.clear();
if (idx.isEmpty()) {
otherChecks.remove(kq);
if (kq instanceof Query.Regex) {
Query.Regex re = (Query.Regex) kq;
otherChecksTree.remove(re.pattern().prefix(), kq);
} else {
otherChecksTree.remove(null, kq);
}
}
}
// Not queries should match if the key is missing from the id, so they need to
// be included in the other keys sub-tree as well. Check this by seeing if it will
// match an empty map as there could be a variety of inverted types.
if (kq.matches(Collections.emptyMap()) && missingKeysIdx != null) {
result |= missingKeysIdx.remove(queries, j, value);
if (missingKeysIdx.isEmpty())
missingKeysIdx = null;
}
}
} else if (otherKeysIdx != null) {
result |= otherKeysIdx.remove(queries, i, value);
if (otherKeysIdx.isEmpty())
otherKeysIdx = null;
}
} else {
result |= matches.remove(value);
}
return result;
}
/**
* Returns true if this index is empty and wouldn't match any ids.
*/
public boolean isEmpty() {
return matches.isEmpty()
&& equalChecks.values().stream().allMatch(QueryIndex::isEmpty)
&& otherChecks.values().stream().allMatch(QueryIndex::isEmpty)
&& (hasKeyIdx == null || hasKeyIdx.isEmpty())
&& (otherKeysIdx == null || otherKeysIdx.isEmpty())
&& (missingKeysIdx == null || missingKeysIdx.isEmpty());
}
/**
* Find all values where the corresponding queries match the specified id.
*
* @param id
* Id to check against the queries.
* @return
* List of all matching values for the id.
*/
public List<T> findMatches(Id id) {
List<T> result = new ArrayList<>();
forEachMatch(id, result::add);
return result;
}
/**
* Invoke the consumer for all values where the corresponding queries match the specified id.
*
* @param id
* Id to check against the queries.
* @param consumer
* Function to invoke for values associated with a query that matches the id.
*/
public void forEachMatch(Id id, Consumer<T> consumer) {
forEachMatch(id, 0, consumer);
}
@SuppressWarnings("PMD.NPathComplexity")
private void forEachMatch(Id tags, int i, Consumer<T> consumer) {
// Matches for this level
matches.forEach(consumer);
if (key != null) {
boolean keyPresent = false;
for (int j = i; j < tags.size(); ++j) {
String k = tags.getKey(j);
String v = tags.getValue(j);
int cmp = compare(k, key);
if (cmp == 0) {
keyPresent = true;
// Find exact matches
QueryIndex<T> eqIdx = equalChecks.get(v);
if (eqIdx != null) {
eqIdx.forEachMatch(tags, i + 1, consumer);
}
// Scan for matches with other conditions
List<QueryIndex<T>> otherMatches = otherChecksCache.get(v);
if (otherMatches == null) {
// Avoid the list and cache allocations if there are no other checks at
// this level
if (!otherChecks.isEmpty()) {
List<QueryIndex<T>> tmp = new ArrayList<>();
otherChecksTree.forEach(v, kq -> {
if (kq.matches(v)) {
QueryIndex<T> idx = otherChecks.get(kq);
if (idx != null) {
tmp.add(idx);
idx.forEachMatch(tags, i + 1, consumer);
}
}
});
otherChecksCache.put(v, tmp);
}
} else {
// Enhanced for loop typically results in iterator being allocated. Using
// size/get avoids the allocation and has better throughput.
int n = otherMatches.size();
for (int p = 0; p < n; ++p) {
otherMatches.get(p).forEachMatch(tags, i + 1, consumer);
}
}
// Check matches for has key
if (hasKeyIdx != null) {
hasKeyIdx.forEachMatch(tags, i, consumer);
}
}
// Quit loop if the key was found or not present
if (cmp >= 0) {
break;
}
}
// Check matches with other keys
if (otherKeysIdx != null) {
otherKeysIdx.forEachMatch(tags, i, consumer);
}
// Check matches with missing keys
if (missingKeysIdx != null && !keyPresent) {
missingKeysIdx.forEachMatch(tags, i, consumer);
}
}
}
/**
* Find all values where the corresponding queries match the specified tags. This can be
* used if the tags are not already structured as a spectator Id.
*
* @param tags
* Function to look up the value for a given tag key. The function should return
* {@code null} if there is no value for the key.
* @return
* List of all matching values for the id.
*/
public List<T> findMatches(Function<String, String> tags) {
List<T> result = new ArrayList<>();
forEachMatch(tags, result::add);
return result;
}
/**
* Invoke the consumer for all values where the corresponding queries match the specified tags.
* This can be used if the tags are not already structured as a spectator Id.
*
* @param tags
* Function to look up the value for a given tag key. The function should return
* {@code null} if there is no value for the key.
* @param consumer
* Function to invoke for values associated with a query that matches the id.
*/
public void forEachMatch(Function<String, String> tags, Consumer<T> consumer) {
// Matches for this level
matches.forEach(consumer);
boolean keyPresent = false;
if (key != null) {
String v = tags.apply(key);
if (v != null) {
keyPresent = true;
// Find exact matches
QueryIndex<T> eqIdx = equalChecks.get(v);
if (eqIdx != null) {
eqIdx.forEachMatch(tags, consumer);
}
// Scan for matches with other conditions
List<QueryIndex<T>> otherMatches = otherChecksCache.get(v);
if (otherMatches == null) {
// Avoid the list and cache allocations if there are no other checks at
// this level
if (!otherChecks.isEmpty()) {
List<QueryIndex<T>> tmp = new ArrayList<>();
otherChecksTree.forEach(v, kq -> {
if (kq.matches(v)) {
QueryIndex<T> idx = otherChecks.get(kq);
if (idx != null) {
tmp.add(idx);
idx.forEachMatch(tags, consumer);
}
}
});
otherChecksCache.put(v, tmp);
}
} else {
// Enhanced for loop typically results in iterator being allocated. Using
// size/get avoids the allocation and has better throughput.
int n = otherMatches.size();
for (int p = 0; p < n; ++p) {
otherMatches.get(p).forEachMatch(tags, consumer);
}
}
// Check matches for has key
if (hasKeyIdx != null) {
hasKeyIdx.forEachMatch(tags, consumer);
}
}
}
// Check matches with other keys
if (otherKeysIdx != null) {
otherKeysIdx.forEachMatch(tags, consumer);
}
// Check matches with missing keys
if (missingKeysIdx != null && !keyPresent) {
missingKeysIdx.forEachMatch(tags, consumer);
}
}
@Override public String toString() {
StringBuilder builder = new StringBuilder();
buildString(builder, 0);
return builder.toString();
}
private StringBuilder indent(StringBuilder builder, int n) {
for (int i = 0; i < n * 4; ++i) {
builder.append(' ');
}
return builder;
}
private void buildString(StringBuilder builder, int n) {
if (key != null) {
indent(builder, n).append("key: [").append(key).append("]\n");
}
if (!equalChecks.isEmpty()) {
indent(builder, n).append("equal checks:\n");
equalChecks.forEach((v, idx) -> {
indent(builder, n).append("- [").append(v).append("]\n");
idx.buildString(builder, n + 1);
});
}
if (!otherChecks.isEmpty()) {
indent(builder, n).append("other checks:\n");
otherChecks.forEach((kq, idx) -> {
indent(builder, n).append("- [").append(kq).append("]\n");
idx.buildString(builder, n + 1);
});
}
if (hasKeyIdx != null) {
indent(builder, n).append("has key:\n");
hasKeyIdx.buildString(builder, n + 1);
}
if (otherKeysIdx != null) {
indent(builder, n).append("other keys:\n");
otherKeysIdx.buildString(builder, n + 1);
}
if (missingKeysIdx != null) {
indent(builder, n).append("missing keys:\n");
missingKeysIdx.buildString(builder, n + 1);
}
if (!matches.isEmpty()) {
indent(builder, n).append("matches:\n");
for (T value : matches) {
indent(builder, n).append("- [").append(value).append("]\n");
}
}
}
}
| 6,073 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/PublishPayload.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.api.Measurement;
import java.util.List;
import java.util.Map;
/**
* Wraps a list of measurements with a set of common tags. The common tags are
* typically used for things like the application and instance id.
*
* <b>Classes in this package are only intended for use internally within spectator. They may
* change at any time and without notice.</b>
*/
public final class PublishPayload {
private final Map<String, String> tags;
private final List<Measurement> metrics;
/** Create a new instance. */
public PublishPayload(Map<String, String> tags, List<Measurement> metrics) {
this.tags = tags;
this.metrics = metrics;
}
/** Return the common tags. Needs to be public for Jackson bean mapper. */
public Map<String, String> getTags() {
return tags;
}
/** Return the list of measurements. Needs to be public for Jackson bean mapper. */
public List<Measurement> getMetrics() {
return metrics;
}
@Override public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PublishPayload that = (PublishPayload) o;
return tags.equals(that.tags)
&& metrics.equals(that.metrics);
}
@Override public int hashCode() {
int result = tags.hashCode();
result = 31 * result + metrics.hashCode();
return result;
}
@Override public String toString() {
return "PublishPayload(tags=" + tags + ", metrics=" + metrics + ")";
}
}
| 6,074 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/ValidationResponse.java | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import java.util.List;
/**
* Validation failure response from Atlas publish endpoint.
*/
@SuppressWarnings("PMD.DataClass")
final class ValidationResponse {
private String type;
private int errorCount;
private List<String> message; // singular to match server response
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public int getErrorCount() {
return errorCount;
}
public void setErrorCount(int errorCount) {
this.errorCount = errorCount;
}
public List<String> getMessage() {
return message;
}
public void setMessage(List<String> message) {
this.message = message;
}
String errorSummary() {
return (message == null || message.isEmpty())
? "unknown cause"
: String.join("; ", message);
}
}
| 6,075 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/Subscriptions.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.impl.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/**
* Model object for subscriptions payload coming from LWC service.
*
* <b>Classes in this package are only intended for use internally within spectator. They may
* change at any time and without notice.</b>
*/
public final class Subscriptions {
private static final Logger LOGGER = LoggerFactory.getLogger(Subscriptions.class);
private List<Subscription> expressions = Collections.emptyList();
/** Create a new instance. */
public Subscriptions() {
// Will get filled in with set methods
}
/**
* Merge the subscriptions from this update into a map from subscriptions to
* expiration times.
*
* @param subs
* Existing subscriptions. The map value is the expiration time in millis since
* the epoch.
* @param currentTime
* Current time to use for checking if entries are expired.
* @param expirationTime
* Expiration time used for new and updated entries.
*/
public void update(Map<Subscription, Long> subs, long currentTime, long expirationTime) {
// Update expiration time for existing subs and log new ones
for (Subscription sub : expressions) {
if (subs.put(sub, expirationTime) == null) {
LOGGER.debug("new subscription: {}", sub);
}
}
// Remove any expired entries
Iterator<Map.Entry<Subscription, Long>> it = subs.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<Subscription, Long> entry = it.next();
if (entry.getValue() < currentTime) {
LOGGER.debug("expired: {}", entry.getKey());
it.remove();
}
}
}
/** Return the available subscriptions. */
public List<Subscription> getExpressions() {
return expressions;
}
/** Set the available subscriptions. */
public void setExpressions(List<Subscription> expressions) {
this.expressions = Preconditions.checkNotNull(expressions, "expressions");
}
/** Set the available subscriptions. */
public Subscriptions withExpressions(List<Subscription> expressions) {
setExpressions(expressions);
return this;
}
@Override public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Subscriptions that = (Subscriptions) o;
return expressions.equals(that.expressions);
}
@Override public int hashCode() {
return expressions.hashCode();
}
@Override public String toString() {
return "Subscriptions(" + expressions + ")";
}
}
| 6,076 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/TagsValuePair.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import java.util.Collections;
import java.util.Map;
/**
* Pair consisting of a set of tags and a double value.
*
* <b>Classes in this package are only intended for use internally within spectator. They may
* change at any time and without notice.</b>
*/
public final class TagsValuePair {
private final Map<String, String> tags;
private final double value;
/** Create a new instance. */
public TagsValuePair(Map<String, String> tags, double value) {
this.tags = Collections.unmodifiableMap(tags);
this.value = value;
}
/** Return the tags from the pair. */
public Map<String, String> tags() {
return tags;
}
/** Return the value from the pair. */
public double value() {
return value;
}
@Override public String toString() {
return "TagsValuePair(" + tags + "," + value + ")";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof TagsValuePair)) return false;
TagsValuePair other = (TagsValuePair) obj;
return tags.equals(other.tags) && Double.compare(value, other.value) == 0;
}
@Override public int hashCode() {
int result = tags.hashCode();
result = 31 * result + Double.hashCode(value);
return result;
}
}
| 6,077 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/EvalPayload.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* Wraps a list of measurements with a set of common tags. The common tags are
* typically used for things like the application and instance id.
*
* <b>Classes in this package are only intended for use internally within spectator. They may
* change at any time and without notice.</b>
*/
public final class EvalPayload {
private final long timestamp;
private final List<Metric> metrics;
private final List<Message> messages;
/** Create a new instance. */
public EvalPayload(long timestamp, List<Metric> metrics, List<Message> messages) {
this.timestamp = timestamp;
this.metrics = metrics;
this.messages = messages;
}
/** Create a new instance. */
public EvalPayload(long timestamp, List<Metric> metrics) {
this(timestamp, metrics, Collections.emptyList());
}
/** Return the timestamp for metrics in this payload. */
public long getTimestamp() {
return timestamp;
}
/** Return the metric values for the data in this payload. */
public List<Metric> getMetrics() {
return metrics;
}
/** Return any diagnostic messages that should be sent back to the user. */
public List<Message> getMessages() {
return messages;
}
/**
* Break the payload down to a set of batches to limit the size of requests going to the
* service.
*
* @param batchSize
* Size of the metric batches to create.
* @return
* List of payloads that have at most {@code batchSize} metrics per payload.
*/
public List<EvalPayload> toBatches(int batchSize) {
int size = metrics.size();
if (size <= batchSize) {
return Collections.singletonList(this);
} else {
List<EvalPayload> payloads = new ArrayList<>(size / batchSize + 1);
for (int i = 0; i < size; i += batchSize) {
List<Metric> batch = metrics.subList(i, Math.min(size, i + batchSize));
// There shouldn't be many messages, stick in the first batch
List<Message> msgs = (i == 0) ? messages : Collections.emptyList();
payloads.add(new EvalPayload(timestamp, batch, msgs));
}
return payloads;
}
}
@Override public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
EvalPayload payload = (EvalPayload) o;
return timestamp == payload.timestamp
&& metrics.equals(payload.metrics)
&& messages.equals(payload.messages);
}
@Override public int hashCode() {
int result = (int) (timestamp ^ (timestamp >>> 32));
result = 31 * result + metrics.hashCode();
result = 31 * result + messages.hashCode();
return result;
}
@Override public String toString() {
return "EvalPayload(timestamp=" + timestamp
+ ", metrics=" + metrics
+ ", messages=" + messages + ")";
}
/** Metric value. */
public static final class Metric {
private final String id;
private final Map<String, String> tags;
private final double value;
/** Create a new instance. */
public Metric(String id, Map<String, String> tags, double value) {
this.id = id;
this.tags = tags;
this.value = value;
}
/** Id for the expression that this data corresponds with. */
public String getId() {
return id;
}
/** Tags for identifying the metric. */
public Map<String, String> getTags() {
return tags;
}
/** Value for the metric. */
public double getValue() {
return value;
}
@Override public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Metric metric = (Metric) o;
return Double.compare(metric.value, value) == 0
&& id.equals(metric.id)
&& tags.equals(metric.tags);
}
@Override public int hashCode() {
int result;
long temp;
result = id.hashCode();
result = 31 * result + tags.hashCode();
temp = Double.doubleToLongBits(value);
result = 31 * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override public String toString() {
return "Metric(id=" + id + ", tags=" + tags + ", value=" + value + ")";
}
}
/** Message. */
@SuppressWarnings("PMD.AvoidFieldNameMatchingTypeName")
public static final class Message {
private final String id;
private final DiagnosticMessage message;
/** Create a new instance. */
public Message(String id, DiagnosticMessage message) {
this.id = id;
this.message = message;
}
/** Id for the expression that resulted in this message. */
public String getId() {
return id;
}
/** Message to send back to the user. */
public DiagnosticMessage getMessage() {
return message;
}
@Override public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Message msg = (Message) o;
return id.equals(msg.id) && message.equals(msg.message);
}
@Override public int hashCode() {
int result;
result = id.hashCode();
result = 31 * result + message.hashCode();
return result;
}
@Override public String toString() {
return "Message(id=" + id + ", message=" + message + ")";
}
}
/** Diagnostic message. */
public static final class DiagnosticMessage {
private final MessageType type;
private final String message;
/** Create a new instance. */
public DiagnosticMessage(MessageType type, String message) {
this.type = type;
this.message = message;
}
/**
* Type of the message. Indicates whether it is purely informational or if there was
* a problem the user needs to handle.
*/
public MessageType getType() {
return type;
}
/** Description of the problem. */
public String getMessage() {
return message;
}
@Override public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DiagnosticMessage msg = (DiagnosticMessage) o;
return type == msg.type && message.equals(msg.message);
}
@Override public int hashCode() {
int result;
result = type.hashCode();
result = 31 * result + message.hashCode();
return result;
}
@Override public String toString() {
return "DiagnosticMessage(type=" + type + ", message=" + message + ")";
}
}
/** Message type. */
public enum MessageType {
/** Informational notices that are primarily to aide in debugging. */
info,
/**
* Notifies the user of something that went wrong or that they should change, but that
* is not causing an immediate problem.
*/
warn,
/** Expression cannot be handled will be rejected. */
error
}
}
| 6,078 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/MeasurementSerializer.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonSerializer;
import com.fasterxml.jackson.databind.SerializerProvider;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Measurement;
import java.io.IOException;
import java.util.function.Function;
/**
* Jackson serializer for measurements. Values will be converted to a
* valid set as they are written out by replacing invalid characters with
* an '_'.
*
* <b>Classes in this package are only intended for use internally within spectator. They may
* change at any time and without notice.</b>
*/
public class MeasurementSerializer extends JsonSerializer<Measurement> {
private final Function<String, String> fixTagString;
/**
* Create a new instance of the serializer.
*
* @param fixTagString
* Function that fixes characters used for tag keys and values.
*/
public MeasurementSerializer(Function<String, String> fixTagString) {
super();
this.fixTagString = fixTagString;
}
@Override
public void serialize(
Measurement value,
JsonGenerator gen,
SerializerProvider serializers) throws IOException {
Id id = value.id();
gen.writeStartObject();
gen.writeObjectFieldStart("tags");
gen.writeStringField("name", fixTagString.apply(id.name()));
boolean explicitDsType = false;
int n = id.size();
for (int i = 1; i < n; ++i) {
final String k = fixTagString.apply(id.getKey(i));
final String v = fixTagString.apply(id.getValue(i));
if (!"name".equals(k)) {
if ("atlas.dstype".equals(k)) {
explicitDsType = true;
}
gen.writeStringField(k, v);
}
}
// If the dstype has not been explicitly set, then the value must be coming in
// as a gauge. Go ahead and explicitly mark it as such because the backend will
// default to a rate.
if (!explicitDsType) {
gen.writeStringField("atlas.dstype", "gauge");
}
gen.writeEndObject();
gen.writeNumberField("timestamp", value.timestamp());
gen.writeNumberField("value", value.value());
gen.writeEndObject();
}
}
| 6,079 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/IdMapper.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.api.Id;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Function;
/**
* Default mapping function from an Id to a Map.
*
* <p><b>Classes in this package are only intended for use internally within spectator. They may
* change at any time and without notice.</b>
*/
public final class IdMapper implements Function<Id, Map<String, String>> {
private final Function<String, String> fixTagString;
/** Create a new instance using the provided function to fix invalid characters in the tags. */
public IdMapper(Function<String, String> fixTagString) {
this.fixTagString = fixTagString;
}
@Override
public Map<String, String> apply(Id id) {
int size = id.size();
Map<String, String> tags = new HashMap<>(size);
// Start at 1 as name will be added last
for (int i = 1; i < size; ++i) {
String k = fixTagString.apply(id.getKey(i));
String v = fixTagString.apply(id.getValue(i));
tags.put(k, v);
}
// Add the name, it is added last so it will have precedence if the user tried to
// use a tag key of "name".
String name = fixTagString.apply(id.name());
tags.put("name", name);
return tags;
}
}
| 6,080 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/EvaluatorConfig.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.NoopRegistry;
import com.netflix.spectator.atlas.AtlasConfig;
import java.util.Map;
import java.util.function.Function;
/**
* Additional interface that can be implemented by the AtlasConfig instance providing knobs
* for internal registry details.
*
* <p><b>Classes in this package are only intended for use internally within spectator. They may
* change at any time and without notice.</b>
*/
public interface EvaluatorConfig {
/** Create a new instance from an AtlasConfig. */
static EvaluatorConfig fromAtlasConfig(AtlasConfig config) {
if (config instanceof EvaluatorConfig) {
return (EvaluatorConfig) config;
} else {
return new EvaluatorConfig() {
@Override public long evaluatorStepSize() {
return config.lwcStep().toMillis();
}
@Override public Map<String, String> commonTags() {
return config.commonTags();
}
@Override public Function<Id, Map<String, String>> idMapper() {
return new IdMapper(JsonUtils.createReplacementFunction(config.validTagCharacters()));
}
};
}
}
/** Step size used for the raw measurements. */
long evaluatorStepSize();
/** Returns the common tags to apply to all metrics reported to Atlas. */
Map<String, String> commonTags();
/** Function to convert an id to a map of key/value pairs. */
default Function<Id, Map<String, String>> idMapper() {
return new IdMapper(Function.identity());
}
/** Supplier for cache to use within the evaluator query index. */
default <T> QueryIndex.CacheSupplier<T> indexCacheSupplier() {
return new QueryIndex.DefaultCacheSupplier<>(new NoopRegistry());
}
/**
* Returns true if the measurements should be polled in parallel using the default
* common fork join pool. For apps that are mostly just reporting metrics this can be
* useful to more quickly process them. Default is false.
*/
default boolean parallelMeasurementPolling() {
return false;
}
/**
* Returns true if gauge aggregation should be delayed until downstream in the final eval
* step. In some cases such as running with an inline aggregator, the same gauge value may
* be reported to multiple instances resulting in it contributing to the aggregate multiple
* times. When delayed, the downstream eval step will receive all datapoints and dedup based
* on a hash to mimic all datapoints being at a single aggregator.
*/
default boolean delayGaugeAggregation() {
return false;
}
}
| 6,081 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/Query.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.impl.PatternMatcher;
import com.netflix.spectator.impl.Preconditions;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
/**
* Query for matching based on tags. For more information see
* <a href="https://netflix.github.io/atlas-docs/asl/tutorial/">Atlas docs</a>.
*
* <p><b>Classes in this package are only intended for use internally within spectator.
* They may change at any time and without notice.</b>
*/
public interface Query {
/** Convert {@code id} to a map. */
static Map<String, String> toMap(Id id) {
Map<String, String> tags = new HashMap<>();
for (Tag t : id.tags()) {
tags.put(t.key(), t.value());
}
tags.put("name", id.name());
return tags;
}
/**
* Check to see if this query matches a set of tags. Common tags or changes to fix
* invalid characters should be performed prior to checking for a match.
*
* @param tags
* Tags to use when checking for a match.
* @return
* True if the query expression matches the tag map.
*/
boolean matches(Map<String, String> tags);
/**
* Check to see if this query matches an id. Equivalent to calling {@link #matches(Map)}
* with the result of {@link #toMap(Id)}.
*
* @param id
* Id to use when checking for a match.
* @return
* True if the query expression matches the id.
*/
default boolean matches(Id id) {
return matches(toMap(id));
}
/**
* Extract the tags from the query that have an exact match for a given value. That
* is are specified using an {@link Equal} clause.
*
* @return
* Tags that are exactly matched as part of the query.
*/
default Map<String, String> exactTags() {
return Collections.emptyMap();
}
/** Returns a new query: {@code this AND q}. */
default Query and(Query q) {
return (q == TRUE || q == FALSE) ? q.and(this) : new And(this, q);
}
/** Returns a new query: {@code this OR q}. */
default Query or(Query q) {
return (q == TRUE || q == FALSE) ? q.or(this) : new Or(this, q);
}
/** Returns an inverted version of this query. */
default Query not() {
return (this instanceof KeyQuery)
? new InvertedKeyQuery((KeyQuery) this)
: new Not(this);
}
/**
* Converts this query into disjunctive normal form. The return value is a list of
* sub-queries that should be ORd together.
*/
default List<Query> dnfList() {
return Collections.singletonList(this);
}
/**
* Converts this query into a list of sub-queries that can be ANDd together. The query will
* not be normalized first, it will only expand top-level AND clauses.
*/
default List<Query> andList() {
return Collections.singletonList(this);
}
/**
* Return a new query that has been simplified by pre-evaluating the conditions for a set
* of tags that are common to all metrics.
*/
default Query simplify(Map<String, String> tags) {
return this;
}
/** Query that always matches. */
Query TRUE = new Query() {
@Override public Query and(Query q) {
return q;
}
@Override public Query or(Query q) {
return TRUE;
}
@Override public Query not() {
return FALSE;
}
@Override public boolean matches(Map<String, String> tags) {
return true;
}
@Override public String toString() {
return ":true";
}
};
/** Query that never matches. */
Query FALSE = new Query() {
@Override public Query and(Query q) {
return FALSE;
}
@Override public Query or(Query q) {
return q;
}
@Override public Query not() {
return TRUE;
}
@Override public boolean matches(Map<String, String> tags) {
return false;
}
@Override public String toString() {
return ":false";
}
};
/** Query that matches if both sub-queries match. */
final class And implements Query {
private final Query q1;
private final Query q2;
/** Create a new instance. */
And(Query q1, Query q2) {
this.q1 = Preconditions.checkNotNull(q1, "q1");
this.q2 = Preconditions.checkNotNull(q2, "q2");
}
@Override public Query not() {
Query nq1 = q1.not();
Query nq2 = q2.not();
return nq1.or(nq2);
}
@Override public List<Query> dnfList() {
return crossAnd(q1.dnfList(), q2.dnfList());
}
@Override public List<Query> andList() {
List<Query> tmp = new ArrayList<>(q1.andList());
tmp.addAll(q2.andList());
return tmp;
}
private List<Query> crossAnd(List<Query> qs1, List<Query> qs2) {
List<Query> tmp = new ArrayList<>();
for (Query q1 : qs1) {
for (Query q2 : qs2) {
tmp.add(q1.and(q2));
}
}
return tmp;
}
@Override public boolean matches(Map<String, String> tags) {
return q1.matches(tags) && q2.matches(tags);
}
@Override public Map<String, String> exactTags() {
Map<String, String> tags = new HashMap<>();
tags.putAll(q1.exactTags());
tags.putAll(q2.exactTags());
return tags;
}
@Override public Query simplify(Map<String, String> tags) {
Query sq1 = q1.simplify(tags);
Query sq2 = q2.simplify(tags);
return (sq1 != q1 || sq2 != q2) ? sq1.and(sq2) : this;
}
@Override public String toString() {
return q1 + "," + q2 + ",:and";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof And)) return false;
And other = (And) obj;
return q1.equals(other.q1) && q2.equals(other.q2);
}
@Override public int hashCode() {
int result = q1.hashCode();
result = 31 * result + q2.hashCode();
return result;
}
}
/** Query that matches if either sub-queries match. */
final class Or implements Query {
private final Query q1;
private final Query q2;
/** Create a new instance. */
Or(Query q1, Query q2) {
this.q1 = Preconditions.checkNotNull(q1, "q1");
this.q2 = Preconditions.checkNotNull(q2, "q2");
}
@Override public Query not() {
Query nq1 = q1.not();
Query nq2 = q2.not();
return nq1.and(nq2);
}
@Override public List<Query> dnfList() {
List<Query> qs = new ArrayList<>(q1.dnfList());
qs.addAll(q2.dnfList());
return qs;
}
@Override public boolean matches(Map<String, String> tags) {
return q1.matches(tags) || q2.matches(tags);
}
@Override public Query simplify(Map<String, String> tags) {
Query sq1 = q1.simplify(tags);
Query sq2 = q2.simplify(tags);
return (sq1 != q1 || sq2 != q2) ? sq1.or(sq2) : this;
}
@Override public String toString() {
return q1 + "," + q2 + ",:or";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof Or)) return false;
Or other = (Or) obj;
return q1.equals(other.q1) && q2.equals(other.q2);
}
@Override public int hashCode() {
int result = q1.hashCode();
result = 31 * result + q2.hashCode();
return result;
}
}
/** Query that matches if the sub-query does not match. */
final class Not implements Query {
private final Query q;
/** Create a new instance. */
Not(Query q) {
this.q = Preconditions.checkNotNull(q, "q");
}
@Override public Query not() {
return q;
}
@Override public List<Query> dnfList() {
if (q instanceof And) {
And query = (And) q;
List<Query> qs = new ArrayList<>(query.q1.not().dnfList());
qs.addAll(query.q2.not().dnfList());
return qs;
} else if (q instanceof Or) {
Or query = (Or) q;
Query q1 = query.q1.not();
Query q2 = query.q2.not();
return q1.and(q2).dnfList();
} else {
return Collections.singletonList(this);
}
}
@Override public boolean matches(Map<String, String> tags) {
return !q.matches(tags);
}
@Override public Query simplify(Map<String, String> tags) {
Query sq = q.simplify(tags);
return (sq != q) ? sq.not() : this;
}
@Override public String toString() {
return q + ",:not";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof Not)) return false;
Not other = (Not) obj;
return q.equals(other.q);
}
@Override public int hashCode() {
return q.hashCode();
}
}
/** Base interface for simple queries that check the value associated with a single key. */
interface KeyQuery extends Query {
/** Key checked by this query. */
String key();
/** Returns true if the value matches for this query clause. */
boolean matches(String value);
@Override default boolean matches(Map<String, String> tags) {
return matches(tags.get(key()));
}
@Override default Query simplify(Map<String, String> tags) {
String v = tags.get(key());
if (v == null) {
return this;
}
return matches(v) ? Query.TRUE : Query.FALSE;
}
}
/** Checks all of a set of conditions for the same key match the specified value. */
final class CompositeKeyQuery implements KeyQuery {
private final String k;
private final List<KeyQuery> queries;
/** Create a new instance. */
CompositeKeyQuery(KeyQuery query) {
Preconditions.checkNotNull(query, "query");
this.k = query.key();
this.queries = new ArrayList<>();
this.queries.add(query);
}
/** Add another query to the list. */
void add(KeyQuery query) {
Preconditions.checkArg(k.equals(query.key()), "key mismatch: " + k + " != " + query.key());
queries.add(query);
}
@Override public String key() {
return k;
}
@Override public boolean matches(String value) {
for (KeyQuery kq : queries) {
if (!kq.matches(value)) {
return false;
}
}
return true;
}
@Override public String toString() {
StringBuilder builder = new StringBuilder();
boolean first = true;
for (KeyQuery kq : queries) {
if (first) {
first = false;
builder.append(kq);
} else {
builder.append(',').append(kq).append(",:and");
}
}
return builder.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
CompositeKeyQuery that = (CompositeKeyQuery) o;
return Objects.equals(queries, that.queries) && Objects.equals(k, that.k);
}
@Override
public int hashCode() {
return Objects.hash(queries, k);
}
}
/** Query that matches if the underlying key query does not match. */
final class InvertedKeyQuery implements KeyQuery {
private final KeyQuery q;
/** Create a new instance. */
InvertedKeyQuery(KeyQuery q) {
this.q = Preconditions.checkNotNull(q, "q");
}
@Override public Query not() {
return q;
}
@Override public String key() {
return q.key();
}
@Override public boolean matches(String value) {
return !q.matches(value);
}
@Override public Query simplify(Map<String, String> tags) {
Query sq = q.simplify(tags);
return (sq != q) ? sq.not() : this;
}
@Override public String toString() {
return q + ",:not";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof InvertedKeyQuery)) return false;
InvertedKeyQuery other = (InvertedKeyQuery) obj;
return q.equals(other.q);
}
@Override public int hashCode() {
return q.hashCode();
}
}
/** Query that matches if the tag map contains a specified key. */
final class Has implements KeyQuery {
private final String k;
/** Create a new instance. */
Has(String k) {
this.k = Preconditions.checkNotNull(k, "k");
}
@Override public String key() {
return k;
}
@Override public boolean matches(String value) {
return value != null;
}
@Override public boolean matches(Map<String, String> tags) {
return tags.containsKey(k);
}
@Override public String toString() {
return k + ",:has";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof Has)) return false;
Has other = (Has) obj;
return k.equals(other.k);
}
@Override public int hashCode() {
return k.hashCode();
}
}
/** Query that matches if the tag map contains key {@code k} with value {@code v}. */
final class Equal implements KeyQuery {
private final String k;
private final String v;
/** Create a new instance. */
Equal(String k, String v) {
this.k = Preconditions.checkNotNull(k, "k");
this.v = Preconditions.checkNotNull(v, "v");
}
@Override public String key() {
return k;
}
public String value() {
return v;
}
@Override public boolean matches(String value) {
return v.equals(value);
}
@Override public Map<String, String> exactTags() {
Map<String, String> tags = new HashMap<>();
tags.put(k, v);
return tags;
}
@Override public String toString() {
return k + "," + v + ",:eq";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof Equal)) return false;
Equal other = (Equal) obj;
return k.equals(other.k) && v.equals(other.v);
}
@Override public int hashCode() {
int result = k.hashCode();
result = 31 * result + v.hashCode();
return result;
}
}
/**
* Query that matches if the tag map contains key {@code k} with a value in the set
* {@code vs}.
*/
final class In implements KeyQuery {
private final String k;
private final Set<String> vs;
/** Create a new instance. */
In(String k, Set<String> vs) {
Preconditions.checkArg(!vs.isEmpty(), "list of values for :in cannot be empty");
this.k = Preconditions.checkNotNull(k, "k");
this.vs = Preconditions.checkNotNull(vs, "vs");
}
@Override public String key() {
return k;
}
public Set<String> values() {
return vs;
}
@Override public boolean matches(String value) {
return value != null && vs.contains(value);
}
@Override public List<Query> dnfList() {
// For smaller sets expand to a disjunction of equal clauses. This allows them
// to be indexed more efficiently. The size is limited because if there are
// multiple large in clauses in an expression the cross product can become really
// large.
//
// The name key is always expanded as it is used as the root of the QueryIndex. Early
// filtering at the root has a big impact on matching performance.
if ("name".equals(k) || vs.size() <= 5) {
List<Query> queries = new ArrayList<>(vs.size());
for (String v : vs) {
queries.add(new Query.Equal(k, v));
}
return queries;
} else {
return Collections.singletonList(this);
}
}
@Override public String toString() {
String values = String.join(",", vs);
return k + ",(," + values + ",),:in";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof In)) return false;
In other = (In) obj;
return k.equals(other.k) && vs.equals(other.vs);
}
@Override public int hashCode() {
int result = k.hashCode();
result = 31 * result + vs.hashCode();
return result;
}
}
/**
* Query that matches if the tag map contains key {@code k} with a value that is lexically
* less than {@code v}.
*/
final class LessThan implements KeyQuery {
private final String k;
private final String v;
/** Create a new instance. */
LessThan(String k, String v) {
this.k = Preconditions.checkNotNull(k, "k");
this.v = Preconditions.checkNotNull(v, "v");
}
@Override public String key() {
return k;
}
@Override public boolean matches(String value) {
return value != null && value.compareTo(v) < 0;
}
@Override public String toString() {
return k + "," + v + ",:lt";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof LessThan)) return false;
LessThan other = (LessThan) obj;
return k.equals(other.k) && v.equals(other.v);
}
@Override public int hashCode() {
int result = k.hashCode();
result = 31 * result + v.hashCode();
return result;
}
}
/**
* Query that matches if the tag map contains key {@code k} with a value that is lexically
* less than or equal to {@code v}.
*/
final class LessThanEqual implements KeyQuery {
private final String k;
private final String v;
/** Create a new instance. */
LessThanEqual(String k, String v) {
this.k = Preconditions.checkNotNull(k, "k");
this.v = Preconditions.checkNotNull(v, "v");
}
@Override public String key() {
return k;
}
@Override public boolean matches(String value) {
return value != null && value.compareTo(v) <= 0;
}
@Override public String toString() {
return k + "," + v + ",:le";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof LessThanEqual)) return false;
LessThanEqual other = (LessThanEqual) obj;
return k.equals(other.k) && v.equals(other.v);
}
@Override public int hashCode() {
int result = k.hashCode();
result = 31 * result + v.hashCode();
return result;
}
}
/**
* Query that matches if the tag map contains key {@code k} with a value that is lexically
* greater than {@code v}.
*/
final class GreaterThan implements KeyQuery {
private final String k;
private final String v;
/** Create a new instance. */
GreaterThan(String k, String v) {
this.k = Preconditions.checkNotNull(k, "k");
this.v = Preconditions.checkNotNull(v, "v");
}
@Override public String key() {
return k;
}
@Override public boolean matches(String value) {
return value != null && value.compareTo(v) > 0;
}
@Override public String toString() {
return k + "," + v + ",:gt";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof GreaterThan)) return false;
GreaterThan other = (GreaterThan) obj;
return k.equals(other.k) && v.equals(other.v);
}
@Override public int hashCode() {
int result = k.hashCode();
result = 31 * result + v.hashCode();
return result;
}
}
/**
* Query that matches if the tag map contains key {@code k} with a value that is lexically
* greater than or equal to {@code v}.
*/
final class GreaterThanEqual implements KeyQuery {
private final String k;
private final String v;
/** Create a new instance. */
GreaterThanEqual(String k, String v) {
this.k = Preconditions.checkNotNull(k, "k");
this.v = Preconditions.checkNotNull(v, "v");
}
@Override public String key() {
return k;
}
@Override public boolean matches(String value) {
return value != null && value.compareTo(v) >= 0;
}
@Override public String toString() {
return k + "," + v + ",:ge";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof GreaterThanEqual)) return false;
GreaterThanEqual other = (GreaterThanEqual) obj;
return k.equals(other.k) && v.equals(other.v);
}
@Override public int hashCode() {
int result = k.hashCode();
result = 31 * result + v.hashCode();
return result;
}
}
/**
* Query that matches if the tag map contains key {@code k} with a value that matches the
* regex in {@code v}. The expression will be automatically anchored to the start to encourage
* prefix matches.
*
* <p><b>Warning:</b> regular expressions are often expensive and can add a lot of overhead.
* Use them sparingly.</p>
*/
final class Regex implements KeyQuery {
private final String k;
private final String v;
private final PatternMatcher pattern;
private final String name;
/** Create a new instance. */
Regex(String k, String v) {
this(k, v, false, ":re");
}
/** Create a new instance. */
Regex(String k, String v, boolean ignoreCase, String name) {
this.k = Preconditions.checkNotNull(k, "k");
this.v = Preconditions.checkNotNull(v, "v");
if (ignoreCase) {
this.pattern = PatternMatcher.compile("^" + v).ignoreCase();
} else {
this.pattern = PatternMatcher.compile("^" + v);
}
this.name = Preconditions.checkNotNull(name, "name");
}
@Override public String key() {
return k;
}
/** Returns the pattern matcher for checking the values. */
public PatternMatcher pattern() {
return pattern;
}
@Override public boolean matches(String value) {
return value != null && pattern.matches(value);
}
/** Returns true if the pattern will always match. */
public boolean alwaysMatches() {
return pattern.alwaysMatches();
}
@Override public String toString() {
return k + "," + v + "," + name;
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof Regex)) return false;
Regex other = (Regex) obj;
return k.equals(other.k)
&& v.equals(other.v)
&& pattern.equals(other.pattern)
&& name.equals(other.name);
}
@Override public int hashCode() {
int result = k.hashCode();
result = 31 * result + v.hashCode();
result = 31 * result + pattern.hashCode();
result = 31 * result + name.hashCode();
return result;
}
}
}
| 6,082 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/Evaluator.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.api.Utils;
import com.netflix.spectator.impl.Hash64;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
import java.util.function.Function;
/**
* Evaluates all the expressions for a set of subscriptions.
*
* <p><b>Classes in this package are only intended for use internally within spectator. They may
* change at any time and without notice.</b>
*/
public class Evaluator {
private static final Logger LOGGER = LoggerFactory.getLogger(Evaluator.class);
private final Lock lock = new ReentrantLock();
private final Map<String, String> commonTags;
private final Function<Id, Map<String, String>> idMapper;
private final long step;
private final boolean delayGaugeAggregation;
private final QueryIndex<SubscriptionEntry> index;
private final Map<Subscription, SubscriptionEntry> subscriptions;
private final ThreadLocal<SubscriptionEntryConsumer> consumers;
/**
* Create a new instance.
*
* @param config
* Config settings to tune the evaluation behavior.
*/
public Evaluator(EvaluatorConfig config) {
this.commonTags = new TreeMap<>(config.commonTags());
this.idMapper = config.idMapper();
this.step = config.evaluatorStepSize();
this.delayGaugeAggregation = config.delayGaugeAggregation();
this.index = QueryIndex.newInstance(config.indexCacheSupplier());
this.subscriptions = new ConcurrentHashMap<>();
this.consumers = new ThreadLocal<>();
}
/**
* Synchronize the set of subscriptions for this evaluator with the provided set.
*/
public void sync(List<Subscription> subs) {
lock.lock();
try {
Set<Subscription> removed = new HashSet<>(subscriptions.keySet());
for (Subscription sub : subs) {
boolean alreadyPresent = removed.remove(sub);
if (!alreadyPresent) {
try {
// Parse and simplify query
Query q = sub.dataExpr().query().simplify(commonTags);
LOGGER.trace("query pre-eval: original [{}], simplified [{}], common tags {}",
sub.dataExpr().query(), q, commonTags);
// Update index
int multiple = (int) (sub.getFrequency() / step);
SubscriptionEntry entry = new SubscriptionEntry(sub, multiple);
subscriptions.put(sub, entry);
index.add(q, entry);
LOGGER.debug("subscription added: {}", sub);
} catch (Exception e) {
LOGGER.warn("failed to add subscription: {}", sub, e);
}
} else {
LOGGER.trace("subscription already present: {}", sub);
}
}
for (Subscription sub : removed) {
SubscriptionEntry entry = subscriptions.remove(sub);
Query q = sub.dataExpr().query().simplify(commonTags);
index.remove(q, entry);
LOGGER.debug("subscription removed: {}", sub);
}
} finally {
lock.unlock();
}
}
/**
* Update the state. See {@link #update(Id, long, double)} for more information.
*/
public void update(Measurement m) {
index.forEachMatch(m.id(), entry -> entry.update(m));
}
/**
* Update the state for the expressions to be evaluated with the provided datapoint.
*
* @param id
* Id for the datapoint. The value will be collected for each expression where the
* id satisfies the query constraints.
* @param t
* Timestamp for the datapoint. It should be on a boundary for the step interval.
* @param v
* Value for the datapoint.
*/
public void update(Id id, long t, double v) {
// Using a simple lambda with forEachMatch results in a lot of allocations. The
// SubscriptionEntryConsumer can be updated with the datapoint and reused across
// invocations
SubscriptionEntryConsumer consumer = consumers.get();
if (consumer == null) {
consumer = new SubscriptionEntryConsumer();
consumers.set(consumer);
}
consumer.updateMeasurement(id, t, v);
index.forEachMatch(id, consumer);
}
/**
* Evaluate the expressions for all subscriptions against the data available for the provided
* timestamp. The data must be populated by calling {@link #update(Id, long, double)} prior to
* performing the evaluation.
*
* @param timestamp
* Timestamp for the interval to evaluate.
* @return
* Payload representing the results of the evaluation.
*/
public EvalPayload eval(long timestamp) {
List<EvalPayload.Metric> metrics = new ArrayList<>();
subscriptions.values().forEach(subEntry -> {
final String subId = subEntry.subscription.getId();
final long step = subEntry.subscription.getFrequency();
if (timestamp % step == 0) {
LOGGER.debug("evaluating subscription: {}: {}", timestamp, subEntry.subscription);
DataExpr expr = subEntry.subscription.dataExpr();
final boolean delayGaugeAggr = delayGaugeAggregation && expr.isAccumulating();
DataExpr.Aggregator aggregator = expr.aggregator(false);
Iterator<Map.Entry<Id, Consolidator>> it = subEntry.measurements.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<Id, Consolidator> entry = it.next();
final Consolidator consolidator = entry.getValue();
consolidator.update(timestamp, Double.NaN);
final double v = consolidator.value(timestamp);
if (!Double.isNaN(v)) {
Map<String, String> tags = idMapper.apply(entry.getKey());
tags.putAll(commonTags);
if (delayGaugeAggr && consolidator.isGauge()) {
// When performing a group by, datapoints missing tag used for the grouping
// should be ignored
Map<String, String> rs = expr.resultTags(tags);
if (rs != null) {
Map<String, String> resultTags = new HashMap<>(rs);
resultTags.put("atlas.aggr", idHash(entry.getKey()));
double acc = expr.isCount() ? 1.0 : v;
metrics.add(new EvalPayload.Metric(subId, resultTags, acc));
}
} else {
TagsValuePair p = new TagsValuePair(tags, v);
aggregator.update(p);
LOGGER.trace("aggregating: {}: {}", timestamp, p);
}
}
if (consolidator.isEmpty()) {
it.remove();
}
}
for (TagsValuePair pair : aggregator.result()) {
LOGGER.trace("result: {}: {}", timestamp, pair);
metrics.add(new EvalPayload.Metric(subId, pair.tags(), pair.value()));
}
}
});
return new EvalPayload(timestamp, metrics);
}
private String idHash(Id id) {
Hash64 hasher = new Hash64();
hasher.updateString(id.name());
final int size = id.size();
for (int i = 1; i < size; ++i) {
hasher.updateByte((byte) ',');
hasher.updateString(id.getKey(i));
hasher.updateByte((byte) '=');
hasher.updateString(id.getValue(i));
}
return Long.toHexString(hasher.compute());
}
/**
* Helper function that evaluates the data for a given time after updating with the
* provided list of measurements.
*
* @param t
* Timestamp for the interval to evaluate.
* @param ms
* List of measurements to include before performing the evaluation.
* @return
* Payload representing the results of the evaluation.
*/
public EvalPayload eval(long t, List<Measurement> ms) {
ms.forEach(this::update);
return eval(t);
}
/** Used for tests to ensure expected number of subscriptions in the evaluator. */
int subscriptionCount() {
return subscriptions.size();
}
private static class SubscriptionEntry {
private final Subscription subscription;
private final int multiple;
private final ConcurrentHashMap<Id, Consolidator> measurements;
SubscriptionEntry(Subscription subscription, int multiple) {
this.subscription = subscription;
this.multiple = multiple;
this.measurements = new ConcurrentHashMap<>();
}
void update(Measurement m) {
update(m.id(), m.timestamp(), m.value());
}
void update(Id id, long t, double v) {
Consolidator consolidator = Utils.computeIfAbsent(
measurements,
id,
k -> Consolidator.create(k, subscription.getFrequency(), multiple)
);
consolidator.update(t, v);
}
}
/**
* Consumer that allows the measurement data to be mutated. This can be used to avoid
* allocating instances of the closure when calling forEachMatch on the query index.
*/
private static class SubscriptionEntryConsumer implements Consumer<SubscriptionEntry> {
private Id id;
private long timestamp;
private double value;
public void updateMeasurement(Id id, long timestamp, double value) {
this.id = id;
this.timestamp = timestamp;
this.value = value;
}
@Override
public void accept(SubscriptionEntry entry) {
entry.update(id, timestamp, value);
}
}
}
| 6,083 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/PrefixTree.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
/**
* Simple tree for finding all values associated with a prefix that matches the search
* key. The prefix is a simple ascii string. If unsupported characters are used in the
* prefix or search key, then the prefix match will only check up to the unsupported
* character and the caller will need to perform further checks on the returned value.
*/
final class PrefixTree<T> {
private static final int FIRST_CHAR = ' ';
private static final int LAST_CHAR = '~';
private static final int TABLE_SIZE = LAST_CHAR - FIRST_CHAR + 1;
private static int indexOf(char c) {
int i = c - FIRST_CHAR;
return (i >= TABLE_SIZE) ? -1 : i;
}
private final Lock lock = new ReentrantLock();
private final AtomicReferenceArray<PrefixTree<T>> children;
private final Set<T> values;
/** Create a new instance. */
PrefixTree() {
children = new AtomicReferenceArray<>(TABLE_SIZE);
values = ConcurrentHashMap.newKeySet();
}
private PrefixTree<T> computeIfAbsent(int i) {
PrefixTree<T> child = children.get(i);
if (child == null) {
lock.lock();
try {
child = children.get(i);
if (child == null) {
child = new PrefixTree<>();
children.set(i, child);
}
} finally {
lock.unlock();
}
}
return child;
}
/**
* Put a value into the tree.
*
* @param prefix
* ASCII string that represents a prefix for the search key.
* @param value
* Value to associate with the prefix.
*/
void put(String prefix, T value) {
if (prefix == null)
values.add(value);
else
put(prefix, 0, value);
}
private void put(String prefix, int pos, T value) {
if (pos == prefix.length()) {
values.add(value);
} else {
int i = indexOf(prefix.charAt(pos));
if (i < 0) {
values.add(value);
} else {
PrefixTree<T> child = computeIfAbsent(i);
child.put(prefix, pos + 1, value);
}
}
}
/**
* Remove a value from the tree with the associated prefix.
*
* @param prefix
* ASCII string that represents a prefix for the search key.
* @param value
* Value to associate with the prefix.
* @return
* Returns true if a value was removed from the tree.
*/
boolean remove(String prefix, T value) {
if (prefix == null)
return values.remove(value);
else
return remove(prefix, 0, value);
}
private boolean remove(String prefix, int pos, T value) {
if (pos == prefix.length()) {
return values.remove(value);
} else {
int i = indexOf(prefix.charAt(pos));
if (i < 0) {
return values.remove(value);
} else {
PrefixTree<T> child = children.get(i);
if (child == null) {
return false;
} else {
boolean result = child.remove(prefix, pos + 1, value);
if (result && child.isEmpty()) {
lock.lock();
try {
// Check that the children array still has the reference to the
// same child object. The entry may have been replaced by another
// thread.
if (child == children.get(i) && child.isEmpty()) {
children.set(i, null);
}
} finally {
lock.unlock();
}
}
return result;
}
}
}
}
/**
* Get a list of values associated with a prefix of the search key.
*
* @param key
* Key to compare against the prefixes.
* @return
* Values associated with a matching prefix.
*/
List<T> get(String key) {
List<T> results = new ArrayList<>();
forEach(key, results::add);
return results;
}
/**
* Invokes the consumer function for each value associated with a prefix of the search key.
*
* @param key
* Key to compare against the prefixes.
* @param consumer
* Function to call for matching values.
*/
void forEach(String key, Consumer<T> consumer) {
forEach(key, 0, consumer);
}
private void forEach(String key, int pos, Consumer<T> consumer) {
values.forEach(consumer);
if (pos < key.length()) {
int i = indexOf(key.charAt(pos));
if (i >= 0) {
PrefixTree<T> child = children.get(i);
if (child != null) {
child.forEach(key, pos + 1, consumer);
}
}
}
}
/**
* Returns true if the tree is empty.
*/
boolean isEmpty() {
if (values.isEmpty()) {
for (int i = 0; i < TABLE_SIZE; ++i) {
if (children.get(i) != null) {
return false;
}
}
return true;
} else {
return false;
}
}
/**
* Returns the overall number of values in the tree. The size is computed on demand
* by traversing the tree, so this call may be expensive.
*/
int size() {
int sz = values.size();
for (int i = 0; i < TABLE_SIZE; ++i) {
PrefixTree<T> child = children.get(i);
if (child != null) {
sz += child.size();
}
}
return sz;
}
}
| 6,084 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/ValidationHelper.java | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.ipc.http.HttpResponse;
import org.slf4j.Logger;
final class ValidationHelper {
private final Logger logger;
private final ObjectMapper jsonMapper;
private final Counter measurementsSent;
private final Counter measurementsDroppedInvalid;
private final Counter measurementsDroppedHttp;
private final Counter measurementsDroppedOther;
ValidationHelper(Logger logger, ObjectMapper jsonMapper, Registry registry) {
this.logger = logger;
this.jsonMapper = jsonMapper;
Id baseId = registry.createId("spectator.measurements");
Id droppedId = baseId.withTag("id", "dropped");
this.measurementsSent = registry.counter(baseId.withTag("id", "sent"));
this.measurementsDroppedHttp = registry.counter(droppedId.withTag("error", "http-error"));
this.measurementsDroppedInvalid = registry.counter(droppedId.withTag("error", "validation"));
this.measurementsDroppedOther = registry.counter(droppedId.withTag("error", "other"));
}
void incrementDroppedHttp(int amount) {
measurementsDroppedHttp.increment(amount);
}
/**
* Report metrics and do basic logging of validation results to help the user with
* debugging.
*/
void recordResults(int numMeasurements, HttpResponse res) {
if (res.status() == 200) {
measurementsSent.increment(numMeasurements);
} else if (res.status() < 500) {
// For validation:
// 202 - partial failure
// 400 - all failed, could also be some other sort of failure
try {
ValidationResponse vres = jsonMapper.readValue(res.entity(), ValidationResponse.class);
measurementsDroppedInvalid.increment(vres.getErrorCount());
measurementsSent.increment(numMeasurements - vres.getErrorCount());
logger.warn("{} measurement(s) dropped due to validation errors: {}",
vres.getErrorCount(), vres.errorSummary());
} catch (Exception e) {
// Likely some other 400 error. Log at trace level in case the cause is really needed.
logger.trace("failed to parse response", e);
logger.warn("{} measurement(s) dropped. Http status: {}", numMeasurements, res.status());
measurementsDroppedOther.increment(numMeasurements);
}
} else {
// Some sort of server side failure
measurementsDroppedHttp.increment(numMeasurements);
}
}
}
| 6,085 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/DataExpr.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.netflix.spectator.impl.Preconditions;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
/**
* Data expressions for defining how to aggregate values. For more information see
* <a href="https://netflix.github.io/atlas-docs/asl/tutorial/">Atlas docs</a>.
*
* <p><b>Classes in this package are only intended for use internally within spectator.
* They may change at any time and without notice.</b>
*/
public interface DataExpr {
/** Query for selecting the input measurements that should be aggregated. */
Query query();
/** Returns true if the aggregation type is accumulating (sum or count). */
boolean isAccumulating();
/** Returns true if the aggregation type is count. */
default boolean isCount() {
return false;
}
/**
* Get the set of result tags for a particular datapoint. The result tags will include
* everything with an exact match in the query clause and keys used in a group by
* clause.
*
* @param tags
* Full set of tags for a datapoint.
* @return
* Result tags for a datapoint.
*/
Map<String, String> resultTags(Map<String, String> tags);
/**
* Get an aggregator that can be incrementally fed values. See {@link #eval(Iterable)} if
* you already have the completed list of values.
*
* @param tags
* The set of tags for the final aggregate.
* @param shouldCheckQuery
* If true, then values will be checked against the query before applying to the
* aggregate. Otherwise, it is assumed that the user has already verified that the
* datapoint matches before passing it in.
* @return
* Aggregator for this data expression.
*/
Aggregator aggregator(Map<String, String> tags, boolean shouldCheckQuery);
/**
* Get an aggregator that can be incrementally fed values. See {@link #eval(Iterable)} if
* you already have the completed list of values.
*
* @param shouldCheckQuery
* If true, then values will be checked against the query before applying to the
* aggregate. Otherwise, it is assumed that the user has already verified that the
* datapoint matches before passing it in.
* @return
* Aggregator for this data expression.
*/
default Aggregator aggregator(boolean shouldCheckQuery) {
return aggregator(resultTags(Collections.emptyMap()), shouldCheckQuery);
}
/**
* Evaluate the data expression over the input.
*
* @param input
* Set of data values. The data will get filtered based on the query, that does
* not need to be done in advance.
* @return
* Aggregated data values.
*/
default Iterable<TagsValuePair> eval(Iterable<TagsValuePair> input) {
Aggregator aggr = aggregator(true);
for (TagsValuePair p : input) {
aggr.update(p);
}
return aggr.result();
}
/** Helper for incrementally computing an aggregate of a set of tag values. */
interface Aggregator {
/** Update the aggregate with the provided value. */
void update(TagsValuePair p);
/** Returns the aggregated data values. */
Iterable<TagsValuePair> result();
}
/**
* Includes all datapoints that match the query expression.
*/
final class All implements DataExpr {
private final Query query;
/** Create a new instance. */
All(Query query) {
this.query = query;
}
@Override public Query query() {
return query;
}
@Override public boolean isAccumulating() {
return false;
}
@Override public Map<String, String> resultTags(Map<String, String> tags) {
return tags;
}
@Override public Aggregator aggregator(Map<String, String> tags, boolean shouldCheckQuery) {
return new Aggregator() {
private final List<TagsValuePair> pairs = new ArrayList<>();
@Override public void update(TagsValuePair p) {
if (!shouldCheckQuery || query.matches(p.tags())) {
pairs.add(new TagsValuePair(tags, p.value()));
}
}
@Override public Iterable<TagsValuePair> result() {
return pairs;
}
};
}
@Override public String toString() {
return query.toString() + ",:all";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof All)) return false;
All other = (All) obj;
return query.equals(other.query);
}
@Override public int hashCode() {
int result = query.hashCode();
result = 31 * result + ":all".hashCode();
return result;
}
}
/** Base type for simple aggregate functions. */
interface AggregateFunction extends DataExpr {
}
/**
* Aggregates all datapoints that match the query to a single datapoint that is the
* sum of the input values. See <a href="https://netflix.github.io/atlas-docs/asl/ref/sum/">docs</a>
* for more information.
*/
final class Sum implements AggregateFunction {
private final Query query;
private final Map<String, String> queryTags;
/** Create a new instance. */
Sum(Query query) {
this.query = query;
this.queryTags = Collections.unmodifiableMap(query.exactTags());
}
@Override public Query query() {
return query;
}
@Override public boolean isAccumulating() {
return true;
}
@Override public Map<String, String> resultTags(Map<String, String> tags) {
return queryTags;
}
@Override public Aggregator aggregator(Map<String, String> tags, boolean shouldCheckQuery) {
return new Aggregator() {
private double aggr = 0.0;
private int count = 0;
@Override public void update(TagsValuePair p) {
if (!shouldCheckQuery || query.matches(p.tags())) {
aggr += p.value();
++count;
}
}
@Override public Iterable<TagsValuePair> result() {
return (count > 0)
? Collections.singletonList(new TagsValuePair(tags, aggr))
: Collections.emptyList();
}
};
}
@Override public String toString() {
return query.toString() + ",:sum";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof Sum)) return false;
Sum other = (Sum) obj;
return query.equals(other.query) && queryTags.equals(other.queryTags);
}
@Override public int hashCode() {
int result = query.hashCode();
result = 31 * result + queryTags.hashCode();
result = 31 * result + ":sum".hashCode();
return result;
}
}
/**
* Aggregates all datapoints that match the query to a single datapoint that is the
* minimum of the input values. See <a href="https://netflix.github.io/atlas-docs/asl/ref/min/">docs</a>
* for more information.
*/
final class Min implements AggregateFunction {
private final Query query;
private final Map<String, String> queryTags;
/** Create a new instance. */
Min(Query query) {
this.query = query;
this.queryTags = Collections.unmodifiableMap(query.exactTags());
}
@Override public Query query() {
return query;
}
@Override public boolean isAccumulating() {
return false;
}
@Override public Map<String, String> resultTags(Map<String, String> tags) {
return queryTags;
}
@Override public Aggregator aggregator(Map<String, String> tags, boolean shouldCheckQuery) {
return new Aggregator() {
private double aggr = Double.MAX_VALUE;
private int count = 0;
@Override public void update(TagsValuePair p) {
if ((!shouldCheckQuery || query.matches(p.tags())) && p.value() < aggr) {
aggr = p.value();
++count;
}
}
@Override public Iterable<TagsValuePair> result() {
return (count > 0)
? Collections.singletonList(new TagsValuePair(tags, aggr))
: Collections.emptyList();
}
};
}
@Override public String toString() {
return query.toString() + ",:min";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof Min)) return false;
Min other = (Min) obj;
return query.equals(other.query) && queryTags.equals(other.queryTags);
}
@Override public int hashCode() {
int result = query.hashCode();
result = 31 * result + queryTags.hashCode();
result = 31 * result + ":min".hashCode();
return result;
}
}
/**
* Aggregates all datapoints that match the query to a single datapoint that is the
* maximum of the input values. See <a href="https://netflix.github.io/atlas-docs/asl/ref/max/">docs</a>
* for more information.
*/
final class Max implements AggregateFunction {
private final Query query;
private final Map<String, String> queryTags;
/** Create a new instance. */
Max(Query query) {
this.query = query;
this.queryTags = Collections.unmodifiableMap(query.exactTags());
}
@Override public Query query() {
return query;
}
@Override public boolean isAccumulating() {
return false;
}
@Override public Map<String, String> resultTags(Map<String, String> tags) {
return queryTags;
}
@Override public Aggregator aggregator(Map<String, String> tags, boolean shouldCheckQuery) {
return new Aggregator() {
private double aggr = -Double.MAX_VALUE;
private int count = 0;
@Override public void update(TagsValuePair p) {
if ((!shouldCheckQuery || query.matches(p.tags())) && p.value() > aggr) {
aggr = p.value();
++count;
}
}
@Override public Iterable<TagsValuePair> result() {
return (count > 0)
? Collections.singletonList(new TagsValuePair(tags, aggr))
: Collections.emptyList();
}
};
}
@Override public String toString() {
return query.toString() + ",:max";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof Max)) return false;
Max other = (Max) obj;
return query.equals(other.query) && queryTags.equals(other.queryTags);
}
@Override public int hashCode() {
int result = query.hashCode();
result = 31 * result + queryTags.hashCode();
result = 31 * result + ":max".hashCode();
return result;
}
}
/**
* Aggregates all datapoints that match the query to a single datapoint that is the
* number of input values. See <a href="https://netflix.github.io/atlas-docs/asl/ref/count/">docs</a>
* for more information.
*/
final class Count implements AggregateFunction {
private final Query query;
private final Map<String, String> queryTags;
/** Create a new instance. */
Count(Query query) {
this.query = query;
this.queryTags = Collections.unmodifiableMap(query.exactTags());
}
@Override public Query query() {
return query;
}
@Override public boolean isAccumulating() {
return true;
}
@Override public boolean isCount() {
return true;
}
@Override public Map<String, String> resultTags(Map<String, String> tags) {
return queryTags;
}
@Override public Aggregator aggregator(Map<String, String> tags, boolean shouldCheckQuery) {
return new Aggregator() {
private int aggr = 0;
@Override public void update(TagsValuePair p) {
if (!shouldCheckQuery || query.matches(p.tags())) {
++aggr;
}
}
@Override public Iterable<TagsValuePair> result() {
return (aggr > 0)
? Collections.singletonList(new TagsValuePair(tags, aggr))
: Collections.emptyList();
}
};
}
@Override public String toString() {
return query.toString() + ",:count";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof Count)) return false;
Count other = (Count) obj;
return query.equals(other.query) && queryTags.equals(other.queryTags);
}
@Override public int hashCode() {
int result = query.hashCode();
result = 31 * result + queryTags.hashCode();
result = 31 * result + ":count".hashCode();
return result;
}
}
/**
* Compute a set of time series matching the query and grouped by the specified keys.
* See <a href="https://netflix.github.io/atlas-docs/asl/ref/by/">docs</a> for more
* information.
*/
final class GroupBy implements DataExpr {
private final AggregateFunction af;
private final List<String> keys;
/** Create a new instance. */
GroupBy(AggregateFunction af, List<String> keys) {
Preconditions.checkArg(!keys.isEmpty(), "key list for group by cannot be empty");
this.af = af;
this.keys = keys;
}
@SuppressWarnings("PMD.ReturnEmptyCollectionRatherThanNull")
private Map<String, String> keyTags(Map<String, String> tags) {
Map<String, String> result = new HashMap<>();
for (String k : keys) {
String v = tags.get(k);
if (v == null) {
return null;
}
result.put(k, v);
}
return result;
}
@Override public Query query() {
return af.query();
}
@Override public boolean isAccumulating() {
return af.isAccumulating();
}
@Override public boolean isCount() {
return af.isCount();
}
@SuppressWarnings("PMD.ReturnEmptyCollectionRatherThanNull")
@Override public Map<String, String> resultTags(Map<String, String> tags) {
Map<String, String> resultTags = keyTags(tags);
if (resultTags == null) {
return null;
} else {
resultTags.putAll(af.resultTags(tags));
return resultTags;
}
}
@Override public Aggregator aggregator(Map<String, String> ignored, boolean shouldCheckQuery) {
return new Aggregator() {
private final Map<Map<String, String>, Aggregator> aggrs = new HashMap<>();
@Override public void update(TagsValuePair p) {
Map<String, String> tags = p.tags();
if (!shouldCheckQuery || af.query().matches(tags)) {
Map<String, String> k = resultTags(tags);
if (k != null) {
aggrs.computeIfAbsent(k, ks -> af.aggregator(ks, false)).update(p);
}
}
}
@Override public Iterable<TagsValuePair> result() {
return aggrs.values().stream()
.flatMap(a -> StreamSupport.stream(a.result().spliterator(), false))
.collect(Collectors.toList());
}
};
}
@Override public String toString() {
final String keyList = String.join(",", keys);
return af.toString() + ",(," + keyList + ",),:by";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof GroupBy)) return false;
GroupBy other = (GroupBy) obj;
return af.equals(other.af) && keys.equals(other.keys);
}
@Override public int hashCode() {
int result = af.hashCode();
result = 31 * result + keys.hashCode();
result = 31 * result + ":by".hashCode();
return result;
}
}
/**
* Rollup inputs by dropping the specified keys. This is typically used with
* a rollup config to reduce the amount of data going out. If a whitelist
* of keys is needed, then see {@link KeepRollup}.
*/
final class DropRollup implements DataExpr {
private final AggregateFunction af;
private final List<String> keys;
/** Create a new instance. */
DropRollup(AggregateFunction af, List<String> keys) {
Preconditions.checkArg(!keys.contains("name"), "name is required and cannot be dropped");
this.af = af;
this.keys = keys;
}
@Override public Query query() {
return af.query();
}
@Override public boolean isAccumulating() {
return af.isAccumulating();
}
@Override public boolean isCount() {
return af.isCount();
}
@Override public Map<String, String> resultTags(Map<String, String> tags) {
Map<String, String> resultTags = new HashMap<>(tags);
for (String k : keys) {
resultTags.remove(k);
}
return resultTags;
}
@Override public Aggregator aggregator(Map<String, String> tags, boolean shouldCheckQuery) {
return new Aggregator() {
private final Map<Map<String, String>, Aggregator> aggrs = new HashMap<>();
@Override public void update(TagsValuePair p) {
if (!shouldCheckQuery || af.query().matches(p.tags())) {
Map<String, String> k = resultTags(p.tags());
aggrs.computeIfAbsent(k, ks -> af.aggregator(ks, false)).update(p);
}
}
@Override public Iterable<TagsValuePair> result() {
return aggrs.values().stream()
.flatMap(a -> StreamSupport.stream(a.result().spliterator(), false))
.collect(Collectors.toList());
}
};
}
@Override public String toString() {
final String keyList = String.join(",", keys);
return af.toString() + ",(," + keyList + ",),:rollup-drop";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof DropRollup)) return false;
DropRollup other = (DropRollup) obj;
return af.equals(other.af) && keys.equals(other.keys);
}
@Override public int hashCode() {
int result = af.hashCode();
result = 31 * result + keys.hashCode();
result = 31 * result + ":by".hashCode();
return result;
}
}
/**
* Rollup inputs by only keeping the specified keys. This is typically used with
* a rollup config to reduce the amount of data going out. If a blacklist of
* keys is needed, then see {@link DropRollup}.
*/
final class KeepRollup implements DataExpr {
private final AggregateFunction af;
private final Set<String> keys;
/** Create a new instance. */
KeepRollup(AggregateFunction af, List<String> keys) {
this.af = af;
this.keys = new HashSet<>(keys);
this.keys.add("name");
}
@Override public Query query() {
return af.query();
}
@Override public boolean isAccumulating() {
return af.isAccumulating();
}
@Override public boolean isCount() {
return af.isCount();
}
@Override public Map<String, String> resultTags(Map<String, String> tags) {
return tags.entrySet().stream()
.filter(e -> keys.contains(e.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
@Override public Aggregator aggregator(Map<String, String> ignored, boolean shouldCheckQuery) {
return new Aggregator() {
private final Map<Map<String, String>, Aggregator> aggrs = new HashMap<>();
@Override public void update(TagsValuePair p) {
if (!shouldCheckQuery || af.query().matches(p.tags())) {
Map<String, String> k = resultTags(p.tags());
aggrs.computeIfAbsent(k, ks -> af.aggregator(ks, false)).update(p);
}
}
@Override public Iterable<TagsValuePair> result() {
return aggrs.values().stream()
.flatMap(a -> StreamSupport.stream(a.result().spliterator(), false))
.collect(Collectors.toList());
}
};
}
@Override public String toString() {
final String keyList = String.join(",", keys);
return af.toString() + ",(," + keyList + ",),:rollup-keep";
}
@Override public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof KeepRollup)) return false;
KeepRollup other = (KeepRollup) obj;
return af.equals(other.af) && keys.equals(other.keys);
}
@Override public int hashCode() {
int result = af.hashCode();
result = 31 * result + keys.hashCode();
result = 31 * result + ":by".hashCode();
return result;
}
}
}
| 6,086 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/Subscription.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
/**
* Model object for an individual subscription coming from LWC.
*
* <b>Classes in this package are only intended for use internally within spectator. They may
* change at any time and without notice.</b>
*/
public final class Subscription {
private String id;
private String expression;
private long frequency;
private DataExpr expr;
/** Create a new instance. */
public Subscription() {
// Will get filled in with set methods
}
/** Return the data expression for this subscription. */
public DataExpr dataExpr() {
if (expr == null) {
expr = Parser.parseDataExpr(expression);
}
return expr;
}
/** Id for a subscription. */
public String getId() {
return id;
}
/** Set the subscription id. */
public void setId(String id) {
this.id = id;
}
/** Set the subscription id. */
public Subscription withId(String id) {
this.id = id;
return this;
}
/** Expression for the subscription. */
public String getExpression() {
return expression;
}
/** Set the expression for the subscription. */
public void setExpression(String expression) {
this.expression = expression;
this.expr = null;
}
/** Set the expression for the subscription. */
public Subscription withExpression(String expression) {
setExpression(expression);
return this;
}
/** Requested frequency to send data for the subscription. */
public long getFrequency() {
return frequency;
}
/** Set the requested frequency to send data for the subscription. */
public void setFrequency(long frequency) {
this.frequency = frequency;
}
/** Set the requested frequency to send data for the subscription. */
public Subscription withFrequency(long frequency) {
this.frequency = frequency;
return this;
}
@Override public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Subscription that = (Subscription) o;
return frequency == that.frequency
&& equalsOrNull(id, that.id)
&& equalsOrNull(expression, that.expression);
}
private boolean equalsOrNull(Object a, Object b) {
return (a == null && b == null) || (a != null && a.equals(b));
}
@Override public int hashCode() {
int result = hashCodeOrZero(id);
result = 31 * result + hashCodeOrZero(expression);
result = 31 * result + (int) (frequency ^ (frequency >>> 32));
return result;
}
private int hashCodeOrZero(Object o) {
return (o == null) ? 0 : o.hashCode();
}
@Override public String toString() {
return "Subscription(" + id + ",[" + expression + "]," + frequency + ")";
}
}
| 6,087 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/JsonUtils.java | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.netflix.spectator.api.Measurement;
import com.netflix.spectator.impl.AsciiSet;
import java.util.function.Function;
/**
* Helper functions for creating the mappers used to encode Atlas payloads.
*/
public final class JsonUtils {
private JsonUtils() {
}
/**
* Return a mapping function that will replace characters that are not matched by the
* pattern with an underscore.
*/
public static Function<String, String> createReplacementFunction(String pattern) {
if (pattern == null) {
return Function.identity();
} else {
AsciiSet set = AsciiSet.fromPattern(pattern);
return s -> set.replaceNonMembers(s, '_');
}
}
/** Create an object mapper with a custom serializer for measurements. */
public static ObjectMapper createMapper(JsonFactory factory, Function<String, String> fixTag) {
SimpleModule module = new SimpleModule()
.addSerializer(Measurement.class, new MeasurementSerializer(fixTag));
return new ObjectMapper(factory).registerModule(module);
}
}
| 6,088 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/DefaultPublisher.java | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas.impl;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.smile.SmileFactory;
import com.netflix.spectator.api.NoopRegistry;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.atlas.AtlasConfig;
import com.netflix.spectator.atlas.AtlasRegistry;
import com.netflix.spectator.atlas.Publisher;
import com.netflix.spectator.impl.StreamHelper;
import com.netflix.spectator.ipc.http.HttpClient;
import com.netflix.spectator.ipc.http.HttpResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.URI;
import java.time.Instant;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
public final class DefaultPublisher implements Publisher {
private static final String CLOCK_SKEW_TIMER = "spectator.atlas.clockSkew";
private final Logger logger = LoggerFactory.getLogger(AtlasRegistry.class);
private final StreamHelper streamHelper = new StreamHelper();
private final URI uri;
private final URI evalUri;
private final int connectTimeout;
private final int readTimeout;
private final int numThreads;
private final Registry debugRegistry;
private final HttpClient client;
private final ObjectMapper jsonMapper;
private final ObjectMapper smileMapper;
private final ValidationHelper validationHelper;
private ExecutorService senderPool;
public DefaultPublisher(final AtlasConfig config) {
this(config, null);
}
public DefaultPublisher(final AtlasConfig config, final HttpClient client) {
this(config, client, config.debugRegistry());
}
public DefaultPublisher(
final AtlasConfig config, final HttpClient client, final Registry registry) {
this.uri = URI.create(config.uri());
this.evalUri = URI.create(config.evalUri());
this.connectTimeout = (int) config.connectTimeout().toMillis();
this.readTimeout = (int) config.readTimeout().toMillis();
this.numThreads = config.numThreads();
this.debugRegistry = Optional.ofNullable(registry).orElse(new NoopRegistry());
this.client = client != null ? client : HttpClient.create(debugRegistry);
Function<String, String> replacementFunc =
JsonUtils.createReplacementFunction(config.validTagCharacters());
this.jsonMapper = JsonUtils.createMapper(new JsonFactory(), replacementFunc);
this.smileMapper = JsonUtils.createMapper(new SmileFactory(), replacementFunc);
this.validationHelper = new ValidationHelper(logger, jsonMapper, debugRegistry);
}
@Override
public void init() {
// Thread pool for encoding the requests and sending
ThreadFactory factory = new ThreadFactory() {
private final AtomicInteger next = new AtomicInteger();
@Override public Thread newThread(Runnable r) {
final String name = "spectator-atlas-publish-" + next.getAndIncrement();
final Thread t = new Thread(r, name);
t.setDaemon(true);
return t;
}
};
senderPool = Executors.newFixedThreadPool(numThreads, factory);
}
/**
* Optimization to reduce the allocations for encoding the payload. The ByteArrayOutputStreams
* get reused to avoid the allocations for growing the buffer. In addition, the data is gzip
* compressed inline rather than relying on the HTTP client to do it. This reduces the buffer
* sizes and avoids another copy step and allocation for creating the compressed buffer.
*/
private byte[] encodeBatch(PublishPayload payload) throws IOException {
ByteArrayOutputStream baos = streamHelper.getOrCreateStream();
try (GzipLevelOutputStream out = new GzipLevelOutputStream(baos)) {
smileMapper.writeValue(out, payload);
}
return baos.toByteArray();
}
/**
* Record the difference between the date response time and the local time on the server.
* This is used to get a rough idea of the amount of skew in the environment. Ideally it
* should be fairly small. The date header will only have seconds so we expect to regularly
* have differences of up to 1 second. Note, that it is a rough estimate and could be
* elevated because of unrelated problems like GC or network delays.
*/
private void recordClockSkew(long responseTimestamp) {
if (responseTimestamp == 0L) {
logger.debug("no date timestamp on response, cannot record skew");
} else {
final long delta = debugRegistry.clock().wallTime() - responseTimestamp;
if (delta >= 0L) {
// Local clock is running fast compared to the server. Note this should also be the
// common case for if the clocks are in sync as there will be some delay for the server
// response to reach this node.
debugRegistry.timer(CLOCK_SKEW_TIMER, "id", "fast").record(delta, TimeUnit.MILLISECONDS);
} else {
// Local clock is running slow compared to the server. This means the response timestamp
// appears to be after the current time on this node. The timer will ignore negative
// values so we negate and record it with a different id.
debugRegistry.timer(CLOCK_SKEW_TIMER, "id", "slow").record(-delta, TimeUnit.MILLISECONDS);
}
logger.debug("clock skew between client and server: {}ms", delta);
}
}
@Override
public CompletableFuture<Void> publish(PublishPayload payload) {
Runnable task = () -> {
try {
if (logger.isTraceEnabled()) {
logger.trace("publish payload: {}", jsonMapper.writeValueAsString(payload));
}
HttpResponse res = client.post(uri)
.withConnectTimeout(connectTimeout)
.withReadTimeout(readTimeout)
.addHeader("Content-Encoding", "gzip")
.withContent("application/x-jackson-smile", encodeBatch(payload))
.send();
Instant date = res.dateHeader("Date");
recordClockSkew((date == null) ? 0L : date.toEpochMilli());
validationHelper.recordResults(payload.getMetrics().size(), res);
} catch (Exception e) {
logger.warn("failed to send metrics (uri={})", uri, e);
validationHelper.incrementDroppedHttp(payload.getMetrics().size());
}
};
return CompletableFuture.runAsync(task, senderPool);
}
@Override
public CompletableFuture<Void> publish(EvalPayload payload) {
Runnable task = () -> {
try {
String json = jsonMapper.writeValueAsString(payload);
if (logger.isTraceEnabled()) {
logger.trace("eval payload: {}", json);
}
client.post(evalUri)
.withConnectTimeout(connectTimeout)
.withReadTimeout(readTimeout)
.withJsonContent(json)
.send();
} catch (Exception e) {
logger.warn("failed to send metrics for subscriptions (uri={})", evalUri, e);
}
};
return CompletableFuture.runAsync(task, senderPool);
}
@Override
public void close() throws IOException {
if (senderPool != null) {
senderPool.shutdown();
senderPool = null;
}
}
}
| 6,089 |
0 | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas | Create_ds/spectator/spectator-reg-atlas/src/main/java/com/netflix/spectator/atlas/impl/package-info.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Classes in this package are only intended for use internally within spectator. They may change
* at any time and without notice.
*/
package com.netflix.spectator.atlas.impl;
| 6,090 |
0 | Create_ds/spectator/spectator-reg-atlas/src/jmh/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/jmh/java/com/netflix/spectator/atlas/BatchRecordBench.java | /*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Clock;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.infra.Blackhole;
import java.util.Random;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
@State(Scope.Thread)
public class BatchRecordBench {
/**
* This benchmark captures the relative difference between the batch and the iterative
* record() loop when using something like the DistributionSummary.
*
* This also contains a prototype FastClock to compare this against the system
* clocksource for timing performance. Results below.
*
* For both of these tests, the TSC clocksource is used, verified with:
* (nfsuper) ~ $ cat /sys/devices/system/clocksource/clocksource0/current_clocksource
* tsc
*
* Sample results as follows:
* =======
* For an m5.xlarge
* Base AMI: bionic-classicbase-x86_64-202112020555-ebs
*
* The baseline result as of commit 0d09722b8adc5403f767e0d1bbd827c76c2794e0 on m5.xlarge:
* BatchRecordBench.distributionOneAtATime thrpt 25 43.896 ± 0.789 ops/s
*
* As of this commit:
* Benchmark (batchSize) (clock) Mode Cnt Score Error Units
* BatchRecordBench.distributionBatch 1 fastclock thrpt 25 10653757.154 ± 28278.785 ops/s
* BatchRecordBench.distributionBatch 1 system thrpt 25 10698329.757 ± 32951.571 ops/s
* BatchRecordBench.distributionBatch 10 fastclock thrpt 25 9522092.321 ± 28014.036 ops/s
* BatchRecordBench.distributionBatch 10 system thrpt 25 9559843.855 ± 6779.719 ops/s
* BatchRecordBench.distributionBatch 100 fastclock thrpt 25 2897853.712 ± 8733.913 ops/s
* BatchRecordBench.distributionBatch 100 system thrpt 25 2893252.879 ± 7187.279 ops/s
* BatchRecordBench.distributionBatch 1000 fastclock thrpt 25 366835.739 ± 208.298 ops/s
* BatchRecordBench.distributionBatch 1000 system thrpt 25 367808.344 ± 781.413 ops/s
* BatchRecordBench.distributionBatch 10000 fastclock thrpt 25 38245.067 ± 33.743 ops/s
* BatchRecordBench.distributionBatch 10000 system thrpt 25 38303.265 ± 33.823 ops/s
* BatchRecordBench.distributionBatch 100000 fastclock thrpt 25 3846.227 ± 3.359 ops/s
* BatchRecordBench.distributionBatch 100000 system thrpt 25 3847.573 ± 2.944 ops/s
* BatchRecordBench.distributionOneAtATime 1 fastclock thrpt 25 11087902.444 ± 18214.743 ops/s
* BatchRecordBench.distributionOneAtATime 1 system thrpt 25 11132286.157 ± 33009.169 ops/s
* BatchRecordBench.distributionOneAtATime 10 fastclock thrpt 25 1089841.847 ± 1166.172 ops/s
* BatchRecordBench.distributionOneAtATime 10 system thrpt 25 1090766.388 ± 1124.590 ops/s
* BatchRecordBench.distributionOneAtATime 100 fastclock thrpt 25 109084.262 ± 498.868 ops/s
* BatchRecordBench.distributionOneAtATime 100 system thrpt 25 108806.769 ± 109.836 ops/s
* BatchRecordBench.distributionOneAtATime 1000 fastclock thrpt 25 11547.783 ± 28.136 ops/s
* BatchRecordBench.distributionOneAtATime 1000 system thrpt 25 11548.784 ± 37.187 ops/s
* BatchRecordBench.distributionOneAtATime 10000 fastclock thrpt 25 1051.379 ± 1.347 ops/s
* BatchRecordBench.distributionOneAtATime 10000 system thrpt 25 1054.978 ± 2.069 ops/s
* BatchRecordBench.distributionOneAtATime 100000 fastclock thrpt 25 105.588 ± 0.366 ops/s
* BatchRecordBench.distributionOneAtATime 100000 system thrpt 25 105.453 ± 0.186 ops/s
*
* =======
* For an r5.xlarge
* Base AMI: bionic-classicbase-x86_64-202201262157-ebs
*
* The baseline result as of commit 0d09722b8adc5403f767e0d1bbd827c76c2794e0:
* BatchRecordBench.distributionOneAtATime thrpt 25 40.099 ± 0.539 ops/s
*
* As of this commit:
* Benchmark (batchSize) (clock) Mode Cnt Score Error Units
* BatchRecordBench.distributionBatch 1 fastclock thrpt 25 9385655.567 ± 340778.340 ops/s
* BatchRecordBench.distributionBatch 1 system thrpt 25 9668568.661 ± 289469.212 ops/s
* BatchRecordBench.distributionBatch 10 fastclock thrpt 25 8364336.008 ± 379818.726 ops/s
* BatchRecordBench.distributionBatch 10 system thrpt 25 8291598.971 ± 335957.214 ops/s
* BatchRecordBench.distributionBatch 100 fastclock thrpt 25 2690204.344 ± 71589.264 ops/s
* BatchRecordBench.distributionBatch 100 system thrpt 25 2655837.607 ± 84584.223 ops/s
* BatchRecordBench.distributionBatch 1000 fastclock thrpt 25 337184.218 ± 10589.541 ops/s
* BatchRecordBench.distributionBatch 1000 system thrpt 25 338195.706 ± 8983.223 ops/s
* BatchRecordBench.distributionBatch 10000 fastclock thrpt 25 35384.994 ± 940.235 ops/s
* BatchRecordBench.distributionBatch 10000 system thrpt 25 35098.662 ± 898.807 ops/s
* BatchRecordBench.distributionBatch 100000 fastclock thrpt 25 3544.591 ± 106.967 ops/s
* BatchRecordBench.distributionBatch 100000 system thrpt 25 3481.677 ± 92.268 ops/s
* BatchRecordBench.distributionOneAtATime 1 fastclock thrpt 25 9101177.222 ± 327727.423 ops/s
* BatchRecordBench.distributionOneAtATime 1 system thrpt 25 9215129.213 ± 450134.957 ops/s
* BatchRecordBench.distributionOneAtATime 10 fastclock thrpt 25 898688.979 ± 47922.252 ops/s
* BatchRecordBench.distributionOneAtATime 10 system thrpt 25 928435.778 ± 29301.506 ops/s
* BatchRecordBench.distributionOneAtATime 100 fastclock thrpt 25 96247.965 ± 3416.122 ops/s
* BatchRecordBench.distributionOneAtATime 100 system thrpt 25 95073.152 ± 3232.757 ops/s
* BatchRecordBench.distributionOneAtATime 1000 fastclock thrpt 25 9977.180 ± 263.313 ops/s
* BatchRecordBench.distributionOneAtATime 1000 system thrpt 25 10139.411 ± 351.895 ops/s
* BatchRecordBench.distributionOneAtATime 10000 fastclock thrpt 25 921.888 ± 28.627 ops/s
* BatchRecordBench.distributionOneAtATime 10000 system thrpt 25 893.432 ± 27.858 ops/s
* BatchRecordBench.distributionOneAtATime 100000 fastclock thrpt 25 91.880 ± 3.205 ops/s
* BatchRecordBench.distributionOneAtATime 100000 system thrpt 25 92.206 ± 3.617 ops/s
* =======
*/
private AtlasRegistry registry;
private AtlasDistributionSummary dist;
private long[] amounts;
@Param({ "fastclock", "system" })
public String clock;
@Param({ "1", "10", "100", "1000", "10000", "100000" })
public String batchSize;
private Clock clockInstance;
void selectClock() {
switch (clock) {
case "fastclock": clockInstance = new FastClock(); return;
case "system": clockInstance = Clock.SYSTEM; return;
default: throw new UnsupportedOperationException("invalid clock type selected, should be 'fastclock' or 'system'");
}
}
@Setup
public void setup() {
selectClock();
registry = new AtlasRegistry(clockInstance, System::getProperty);
dist = new AtlasDistributionSummary(registry.createId("test"), Clock.SYSTEM, 10_000, 10_000);
amounts = new long[Integer.parseInt(batchSize)];
Random r = new Random(42);
for (int i = 0; i < amounts.length; i++) {
amounts[i] = r.nextInt(2000);
}
}
@TearDown
public void tearDown() throws Exception {
if (clockInstance instanceof FastClock) {
((AutoCloseable) clockInstance).close();
}
}
@Benchmark
public void distributionOneAtATime(Blackhole bh) {
for (long amount : amounts) {
dist.record(amount);
}
bh.consume(dist);
}
@Benchmark
public void distributionBatch(Blackhole bh) {
dist.record(amounts, amounts.length);
bh.consume(dist);
}
public final class FastClock implements Clock, AutoCloseable {
private final AtomicLong now;
private final ScheduledExecutorService exec;
private ScheduledFuture future;
public FastClock() {
now = new AtomicLong(System.currentTimeMillis());
exec = Executors.newSingleThreadScheduledExecutor();
future = exec.scheduleWithFixedDelay(this::updateWallTime, 1, 1, TimeUnit.MILLISECONDS);
}
private void updateWallTime() {
now.set(System.currentTimeMillis());
}
@Override
public long wallTime() {
return now.get();
}
@Override
public long monotonicTime() {
return System.nanoTime();
}
@Override
public void close() throws Exception {
future.cancel(true);
exec.shutdownNow();
}
}
}
| 6,091 |
0 | Create_ds/spectator/spectator-reg-atlas/src/jmh/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/jmh/java/com/netflix/spectator/atlas/ListIteration.java | /*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.infra.Blackhole;
import java.util.ArrayList;
import java.util.List;
@State(Scope.Thread)
public class ListIteration {
private List<Integer> data;
@Setup
public void setup() {
data = new ArrayList<>(100);
for (int i = 0; i < 100; ++i) {
data.add(i);
}
}
@Benchmark
public void forEach(Blackhole bh) {
for (Integer i : data) {
bh.consume(i);
}
}
@Benchmark
public void forUsingGet(Blackhole bh) {
int n = data.size();
for (int i = 0; i < n; ++i) {
bh.consume(data.get(i));
}
}
}
| 6,092 |
0 | Create_ds/spectator/spectator-reg-atlas/src/jmh/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/jmh/java/com/netflix/spectator/atlas/BatchUpdates.java | /*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.DistributionSummary;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Timer;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.infra.Blackhole;
import java.util.concurrent.TimeUnit;
/**
* <pre>
* Benchmark Mode Cnt Score Error Units
*
* noInstrumentation thrpt 5 3978.248 ± 136.863 ops/s
*
* counter thrpt 5 14.138 ± 0.229 ops/s
* counterBatch thrpt 5 464.445 ± 8.175 ops/s
*
* distSummary thrpt 5 9.383 ± 0.732 ops/s
* distSummaryBatch thrpt 5 353.769 ± 10.698 ops/s
*
* timer thrpt 5 10.505 ± 0.170 ops/s
* timerBatch thrpt 5 336.505 ± 3.538 ops/s
* </pre>
*/
@State(Scope.Thread)
public class BatchUpdates {
private Registry registry;
@Setup
public void setup() {
registry = new AtlasRegistry(Clock.SYSTEM, System::getProperty);
}
@TearDown
public void tearDown() {
registry = null;
}
@Benchmark
public void noInstrumentation(Blackhole bh) {
long sum = 0L;
for (int i = 0; i < 1_000_000; ++i) {
sum += i;
}
bh.consume(sum);
}
@Benchmark
public void counter(Blackhole bh) {
Counter c = registry.counter("test");
long sum = 0L;
for (int i = 0; i < 1_000_000; ++i) {
sum += i;
c.increment();
}
bh.consume(sum);
}
@Benchmark
public void counterBatch(Blackhole bh) throws Exception {
Counter c = registry.counter("test");
try (Counter.BatchUpdater b = c.batchUpdater(100_000)) {
long sum = 0L;
for (int i = 0; i < 1_000_000; ++i) {
sum += i;
b.increment();
}
bh.consume(sum);
}
}
@Benchmark
public void timer(Blackhole bh) {
Timer t = registry.timer("test");
long sum = 0L;
for (int i = 0; i < 1_000_000; ++i) {
sum += i;
t.record(i, TimeUnit.MILLISECONDS);
}
bh.consume(sum);
}
@Benchmark
public void timerBatch(Blackhole bh) throws Exception {
Timer t = registry.timer("test");
try (Timer.BatchUpdater b = t.batchUpdater(100_000)) {
long sum = 0L;
for (int i = 0; i < 1_000_000; ++i) {
sum += i;
b.record(i, TimeUnit.MILLISECONDS);
}
bh.consume(sum);
}
}
@Benchmark
public void distSummary(Blackhole bh) {
DistributionSummary d = registry.distributionSummary("test");
long sum = 0L;
for (int i = 0; i < 1_000_000; ++i) {
sum += i;
d.record(i);
}
bh.consume(sum);
}
@Benchmark
public void distSummaryBatch(Blackhole bh) throws Exception {
DistributionSummary d = registry.distributionSummary("test");
try (DistributionSummary.BatchUpdater b = d.batchUpdater(100_000)) {
long sum = 0L;
for (int i = 0; i < 1_000_000; ++i) {
sum += i;
b.record(i);
}
bh.consume(sum);
}
}
}
| 6,093 |
0 | Create_ds/spectator/spectator-reg-atlas/src/jmh/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/jmh/java/com/netflix/spectator/atlas/PollMetersBench.java | /*
* Copyright 2014-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.ManualClock;
import com.netflix.spectator.api.histogram.PercentileTimer;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.infra.Blackhole;
import java.util.Random;
import java.util.concurrent.TimeUnit;
@State(Scope.Thread)
public class PollMetersBench {
private ManualClock clock;
private AtlasRegistry registry;
@Setup
public void setup() {
clock = new ManualClock();
registry = new AtlasRegistry(clock, System::getProperty);
Random r = new Random(42);
for (int i = 0; i < 100_000; ++i) {
switch (r.nextInt(8)) {
case 0:
registry.timer(randomId(r)).record(42, TimeUnit.MILLISECONDS);
break;
case 1:
registry.distributionSummary(randomId(r)).record(42);
break;
case 2:
registry.gauge(randomId(r)).set(42.0);
break;
case 3:
registry.maxGauge(randomId(r)).set(42.0);
break;
case 4:
PercentileTimer.builder(registry)
.withId(randomId(r))
.build()
.record(r.nextInt(60_000), TimeUnit.MILLISECONDS);
break;
default:
registry.counter(randomId(r)).increment();
break;
}
}
}
private Id randomId(Random r) {
Id tmp = Id.create(randomString(r, 2 + r.nextInt(120)));
int n = r.nextInt(20);
for (int i = 0; i < n; ++i) {
String k = randomString(r, 2 + r.nextInt(60));
String v = randomString(r, 2 + r.nextInt(120));
tmp = tmp.withTag(k, v);
}
return tmp;
}
private String randomString(Random r, int len) {
StringBuilder builder = new StringBuilder(len);
for (int i = 0; i < len; ++i) {
builder.append(randomChar(r));
}
return builder.toString();
}
private char randomChar(Random r) {
final int range = '~' - '!';
return (char) ('!' + r.nextInt(range));
}
@Benchmark
public void pollMeters(Blackhole bh) {
long t = clock.wallTime() + 1;
clock.setWallTime(t);
registry.pollMeters(t);
bh.consume(registry.getBatches(t));
}
}
| 6,094 |
0 | Create_ds/spectator/spectator-reg-atlas/src/jmh/java/com/netflix/spectator | Create_ds/spectator/spectator-reg-atlas/src/jmh/java/com/netflix/spectator/atlas/EnsureIdTags.java | /*
* Copyright 2014-2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.atlas;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Statistic;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.infra.Blackhole;
/**
* <pre>
* Benchmark Mode Cnt Score Error Units
* checkMissing thrpt 5 7446459.299 ± 236506.306 ops/s
* checkMissing:·gc.alloc.rate thrpt 5 1875.207 ± 59.556 MB/sec
* checkMissing:·gc.alloc.rate.norm thrpt 5 264.062 ± 0.003 B/op
* checkMissing:·gc.count thrpt 5 313.000 counts
* checkMissing:·gc.time thrpt 5 153.000 ms
* checkMissing:·stack thrpt NaN ---
* checkPresent thrpt 5 48437646.563 ± 601766.743 ops/s
* checkPresent:·gc.alloc.rate thrpt 5 0.426 ± 0.006 MB/sec
* checkPresent:·gc.alloc.rate.norm thrpt 5 0.009 ± 0.001 B/op
* checkPresent:·gc.count thrpt 5 1.000 counts
* checkPresent:·gc.time thrpt 5 2.000 ms
* checkPresent:·stack thrpt NaN ---
* newIdMissing thrpt 5 6415187.273 ± 188476.396 ops/s
* newIdMissing:·gc.alloc.rate thrpt 5 2300.752 ± 67.568 MB/sec
* newIdMissing:·gc.alloc.rate.norm thrpt 5 376.071 ± 0.004 B/op
* newIdMissing:·gc.count thrpt 5 384.000 counts
* newIdMissing:·gc.time thrpt 5 194.000 ms
* newIdMissing:·stack thrpt NaN ---
* newIdPresent thrpt 5 7329062.490 ± 67842.114 ops/s
* newIdPresent:·gc.alloc.rate thrpt 5 2740.286 ± 25.401 MB/sec
* newIdPresent:·gc.alloc.rate.norm thrpt 5 392.062 ± 0.001 B/op
* newIdPresent:·gc.count thrpt 5 373.000 counts
* newIdPresent:·gc.time thrpt 5 190.000 ms
* newIdPresent:·stack thrpt NaN ---
* </pre>
*/
@State(Scope.Thread)
public class EnsureIdTags {
private static final Id BASE_ID = Id.create("ipc.server.call")
.withTag("nf.app", "www")
.withTag("nf.cluster", "www-main")
.withTag("nf.asg", "www-main-v001")
.withTag("nf.stack", "main")
.withTag("nf.node", "i-1234567890")
.withTag("nf.region", "us-east-1")
.withTag("nf.zone", "us-east-1c")
.withTag("nf.vmtype", "m5.xlarge")
.withTag("ipc.client.app", "db")
.withTag("ipc.client.cluster", "db-main")
.withTag("ipc.client.asg", "db-main-v042")
.withTag("ipc.endpoint", "/query")
.withTag("ipc.status", "success")
.withTag("ipc.status.detail", "200")
.withTag("ipc.result", "success");
private final Id STAT_ID = BASE_ID.withTag(Statistic.count).withTag(DsType.rate);
@Benchmark
public void newIdMissing(Blackhole bh) {
Id stat = Id.create(BASE_ID.name())
.withTag(Statistic.count)
.withTag(DsType.rate)
.withTags(BASE_ID.tags());
bh.consume(stat);
}
@Benchmark
public void newIdPresent(Blackhole bh) {
Id stat = Id.create(STAT_ID.name())
.withTag(Statistic.count)
.withTag(DsType.rate)
.withTags(STAT_ID.tags());
bh.consume(stat);
}
@Benchmark
public void checkMissing(Blackhole bh) {
Id stat = AtlasMeter.addIfMissing(BASE_ID, Statistic.count, DsType.rate);
bh.consume(stat);
}
@Benchmark
public void checkPresent(Blackhole bh) {
Id stat = AtlasMeter.addIfMissing(STAT_ID, Statistic.count, DsType.rate);
bh.consume(stat);
}
}
| 6,095 |
0 | Create_ds/spectator/spectator-perf/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-perf/src/test/java/com/netflix/spectator/perf/NoopMemoryUseTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.perf;
public class NoopMemoryUseTest extends MemoryUseTest {
public NoopMemoryUseTest() {
super("noop");
}
}
| 6,096 |
0 | Create_ds/spectator/spectator-perf/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-perf/src/test/java/com/netflix/spectator/perf/ServoMemoryUseTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.perf;
import org.junit.jupiter.api.Disabled;
@Disabled
public class ServoMemoryUseTest extends MemoryUseTest {
public ServoMemoryUseTest() {
super("servo");
}
}
| 6,097 |
0 | Create_ds/spectator/spectator-perf/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-perf/src/test/java/com/netflix/spectator/perf/DefaultMemoryUseTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.perf;
import org.junit.jupiter.api.Disabled;
@Disabled
public class DefaultMemoryUseTest extends MemoryUseTest {
public DefaultMemoryUseTest() {
super("default");
}
}
| 6,098 |
0 | Create_ds/spectator/spectator-perf/src/test/java/com/netflix/spectator | Create_ds/spectator/spectator-perf/src/test/java/com/netflix/spectator/perf/MemoryUseTest.java | /*
* Copyright 2014-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spectator.perf;
import com.netflix.spectator.api.Registry;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.Test;
import org.openjdk.jol.info.GraphLayout;
public abstract class MemoryUseTest {
private String registryType;
public MemoryUseTest(String registryType) {
this.registryType = registryType;
}
private double javaMajorVersion() {
return Double.parseDouble(System.getProperty("java.specification.version"));
}
private void checkMemoryUsage(Registry registry, long limit) {
// JOL doesn't seem to work with jdk20, will investigate later
Assumptions.assumeTrue(javaMajorVersion() < 20.0);
GraphLayout graph = GraphLayout.parseInstance(registry);
long size = graph.totalSize();
String details = "memory use exceeds limit: " + size + " > " + limit + "\n\n" + graph.toFootprint();
//System.out.println(details);
Assertions.assertTrue(size <= limit, details);
}
@Test
public void manyTags() {
Registry registry = Main.run(registryType, "many-tags");
checkMemoryUsage(registry, 8_000_000);
}
@Test
public void tagKeyExplosion() {
Registry registry = Main.run(registryType, "tag-key-explosion");
checkMemoryUsage(registry, 8_000_000);
}
@Test
public void tagValueExplosion() {
Registry registry = Main.run(registryType, "tag-value-explosion");
checkMemoryUsage(registry, 8_000_000);
}
@Test
public void nameExplosion() {
Registry registry = Main.run(registryType, "name-explosion");
checkMemoryUsage(registry, 8_000_000);
}
}
| 6,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.